diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/README.md b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/README.md deleted file mode 100644 index e1917c6dc153a0aff2ab1e0ec5093cc55b5b77e1..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/README.md +++ /dev/null @@ -1,38 +0,0 @@ -### Example: `you` (use like openai pypi package) - -```python - -from gpt4free import you - -# simple request with links and details -response = you.Completion.create( - prompt="hello world", - detailed=True, - include_links=True, ) - -print(response.dict()) - -# { -# "response": "...", -# "links": [...], -# "extra": {...}, -# "slots": {...} -# } -# } - -# chatbot - -chat = [] - -while True: - prompt = input("You: ") - if prompt == 'q': - break - response = you.Completion.create( - prompt=prompt, - chat=chat) - - print("Bot:", response.text) - - chat.append({"question": prompt, "answer": response.text}) -``` diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/__ini__.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/__ini__.py deleted file mode 100644 index 1e4fd149dd2371c54989bf3b6e034fd60e156213..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/__ini__.py +++ /dev/null @@ -1,108 +0,0 @@ -# Import necessary libraries -import asyncio -from json import dumps, loads -from ssl import create_default_context - -import websockets -from browser_cookie3 import edge -from certifi import where -from requests import get - -# Set up SSL context -ssl_context = create_default_context() -ssl_context.load_verify_locations(where()) - - -def format(msg: dict) -> str: - """Format message as JSON string with delimiter.""" - return dumps(msg) + '\x1e' - - -def get_token(): - """Retrieve token from browser cookies.""" - cookies = {c.name: c.value for c in edge(domain_name='bing.com')} - return cookies['_U'] - - -class AsyncCompletion: - async def create( - prompt: str = 'hello world', - optionSets: list = [ - 'deepleo', - 'enable_debug_commands', - 'disable_emoji_spoken_text', - 'enablemm', - 'h3relaxedimg' - ], - token: str = get_token()): - """Create a connection to Bing AI and send the prompt.""" - - # Send create request - create = get('https://edgeservices.bing.com/edgesvc/turing/conversation/create', - headers={ - 'host': 'edgeservices.bing.com', - 'authority': 'edgeservices.bing.com', - 'cookie': f'_U={token}', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - } - ) - - # Extract conversation data - conversationId = create.json()['conversationId'] - clientId = create.json()['clientId'] - conversationSignature = create.json()['conversationSignature'] - - # Connect to WebSocket - wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size=None, ssl=ssl_context, - extra_headers={ - # Add necessary headers - } - ) - - # Send JSON protocol version - await wss.send(format({'protocol': 'json', 'version': 1})) - await wss.recv() - - # Define message structure - struct = { - # Add necessary message structure - } - - # Send message - await wss.send(format(struct)) - - # Process responses - base_string = '' - final = False - while not final: - objects = str(await wss.recv()).split('\x1e') - for obj in objects: - if obj is None or obj == '': - continue - - response = loads(obj) - if response.get('type') == 1 and response['arguments'][0].get('messages', ): - response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get( - 'text') - - yield (response_text.replace(base_string, '')) - base_string = response_text - - elif response.get('type') == 2: - final = True - - await wss.close() - - -async def run(): - """Run the async completion and print the result.""" - async for value in AsyncCompletion.create( - prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z', - optionSets=[ - "galileo", - ] - ): - print(value, end='', flush=True) - - -asyncio.run(run()) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Visual Studio 2015 for Windows and Become a Master of Multiple Platforms.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Visual Studio 2015 for Windows and Become a Master of Multiple Platforms.md deleted file mode 100644 index 235e9f3b428ee6bfc90357c490281ed33a5903ee..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Visual Studio 2015 for Windows and Become a Master of Multiple Platforms.md +++ /dev/null @@ -1,32 +0,0 @@ - -

How to Download Visual Studio 2015 for Windows and What's New in It

-

Visual Studio 2015 is a powerful and versatile integrated development environment (IDE) that allows you to create applications for various platforms, such as Windows, web, mobile, cloud, and more. It supports multiple programming languages, such as C#, C++, Visual Basic, Python, JavaScript, and more. It also offers many features and tools to help you code faster, debug easier, test better, and collaborate more efficiently.

-

download visual studio 2015 for windows


Download Zip - https://byltly.com/2uKwD3



-

In this article, we will show you how to download Visual Studio 2015 for Windows and what's new in it.

- -

How to Download Visual Studio 2015 for Windows

-

Downloading Visual Studio 2015 for Windows is very simple and fast. Just follow these steps:

-
    -
  1. Go to the official website of Visual Studio 2015: https://visualstudio.microsoft.com/vs/older-downloads/
  2. -
  3. Scroll down to the "Visual Studio 2015" section and click on the "Download" button next to the edition you want. You can choose from Community, Professional, or Enterprise editions. The Community edition is free for individual developers, open-source projects, academic research, education, and small teams. The Professional and Enterprise editions require a subscription or a trial license.
  4. -
  5. Save the setup file on your PC and run it.
  6. -
  7. Follow the instructions to install Visual Studio 2015 on your PC. You can customize the installation by selecting the features and components you want.
  8. -
  9. Launch Visual Studio 2015 and sign in with your Microsoft account if prompted.
  10. -
- -

What's New in Visual Studio 2015

-

Visual Studio 2015 introduces many new features and improvements that make it easier and more productive to develop applications for various platforms. Here are some of the highlights:

- - -

Conclusion

-

Visual Studio 2015 is a great IDE that offers many features and tools to help you create amazing applications for various platforms. It supports multiple programming languages, cross-platform development, improved debugging and diagnostics, enhanced testing and quality, better collaboration and DevOps, and more.

-

-

If you want to download Visual Studio 2015 for Windows and try

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Borderlands 2 Crack Only-SKIDROW Cheat Engine LINK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Borderlands 2 Crack Only-SKIDROW Cheat Engine LINK.md deleted file mode 100644 index 5830dfb7dc2a3ebd96a9c7863723b6b9008592e7..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Borderlands 2 Crack Only-SKIDROW Cheat Engine LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

Borderlands 2 Crack Only-SKIDROW Cheat Engine


Download Ziphttps://imgfil.com/2uxYlQ



-
- d5da3c52bf
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cricket Batting Tips In Tamil Pdf 26 __EXCLUSIVE__.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cricket Batting Tips In Tamil Pdf 26 __EXCLUSIVE__.md deleted file mode 100644 index cf99aeb1e81a4b15ad1b9f2d8d66e73e3efa9454..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Cricket Batting Tips In Tamil Pdf 26 __EXCLUSIVE__.md +++ /dev/null @@ -1,6 +0,0 @@ -

Cricket Batting Tips In Tamil Pdf 26


Download ✸✸✸ https://imgfil.com/2uxYxN



- -The West Indies Cricket Umpires' Association (WICUA) will be holding its 29 th ... Passover Lamb Preparation, Audi R10 2019 , Hatch Meaning In Tamil, DDCA ... dates & notification, application process, exam level‎, mode‎, preparation tips, ... 2019 - Download 2014 asa umpires test answers PDF Full Ebook online right ... 4d29de3e1b
-
-
-

diff --git a/spaces/1phancelerku/anime-remove-background/Como baixar Solar Smash APK e jogar o simulador de destruio do planeta mais divertido.md b/spaces/1phancelerku/anime-remove-background/Como baixar Solar Smash APK e jogar o simulador de destruio do planeta mais divertido.md deleted file mode 100644 index 6c552e9c5ed6d952285623853a74ae89fd70891b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Como baixar Solar Smash APK e jogar o simulador de destruio do planeta mais divertido.md +++ /dev/null @@ -1,148 +0,0 @@ - -

Baixar Solar Smash APK: Um Simulador de Destruição Planetária

-

Você já imaginou como seria destruir um planeta inteiro com um simples toque na tela do seu celular? Se você é fascinado pelo espaço e pelas possibilidades de exploração e destruição que ele oferece, então você precisa conhecer o Solar Smash, um jogo de simulação que permite que você use diversas armas e desastres para aniquilar planetas e sistemas solares. Neste artigo, vamos te mostrar o que é o Solar Smash, como baixar o APK do jogo no seu dispositivo Android, quais são as principais características do jogo, e quais são as dicas e truques para jogar Solar Smash com mais diversão e eficiência. Vamos lá?

-

O que é Solar Smash?

-

Um jogo de simulação que permite destruir planetas com armas variadas

-

Solar Smash é um jogo de simulação desenvolvido pela Paradyme Games, que permite que você use uma variedade de armas diferentes para destruir o planeta que você escolher. Essas armas incluem mísseis nucleares, lasers, asteroides, invasões alienígenas, buracos negros, explosões solares, e muito mais. Você pode combinar as armas entre si para criar reações espetaculares e ver o planeta se desintegrar em pedaços. Você também pode personalizar as suas armas e os seus planetas, mudando a cor, o tamanho, a velocidade, a gravidade, e outros aspectos.

-

baixar solar smash apk


DOWNLOAD ————— https://jinyurl.com/2uNKP1



-

Um jogo gratuito e divertido para os amantes do espaço

-

O melhor de tudo é que o Solar Smash é um jogo completamente gratuito para jogar no seu dispositivo Android. Você não precisa pagar nada para baixar o APK do jogo ou para acessar as suas funcionalidades. Você também não vai se incomodar com anúncios invasivos ou compras dentro do aplicativo. O Solar Smash é um jogo ideal para quem ama o espaço e quer se divertir com as possibilidades de simulação e destruição que ele oferece. Você pode passar horas brincando com as diferentes armas e planetas, experimentando as diferentes interações e resultados.

-

Um jogo com gráficos realistas e imagens da NASA

-

Outro ponto forte do Solar Smash é a sua qualidade gráfica. O jogo possui gráficos realistas e detalhados, que mostram os planetas e as armas com fidelidade. Você pode ver os estragos causados pelas armas nos planetas, como crateras, rachaduras, fogo, fumaça, e até mesmo a destruição completa do núcleo. Além disso, o jogo usa imagens reais da NASA para representar os planetas e os sistemas solares, o que aumenta ainda mais o realismo e a imersão do jogo. Você pode ver a Terra, Marte, Júpiter, Saturno, e outros planetas como eles realmente são, e também explorar planetas secretos que o jogo esconde.

-

Como baixar Solar Smash APK?

-

Passo a passo para baixar o jogo no seu dispositivo Android

-

Baixar o Solar Smash APK no seu dispositivo Android é muito fácil e rápido. Basta seguir os passos abaixo:

-
    -
  1. Acesse o site oficial do Solar Smash ou um site confiável que ofereça o download do APK do jogo.
  2. -
  3. Clique no botão de download e aguarde o arquivo APK ser baixado no seu dispositivo.
  4. -
  5. Localize o arquivo APK na pasta de downloads do seu dispositivo e clique nele para iniciar a instalação.
  6. -
  7. Se for solicitado, permita a instalação de fontes desconhecidas nas configurações do seu dispositivo.
  8. -
  9. Siga as instruções na tela para concluir a instalação do jogo.
  10. -
  11. Pronto! Agora você pode abrir o jogo e começar a destruir planetas.
  12. -
-

Requisitos mínimos e recomendações para rodar o jogo

-

Para rodar o Solar Smash no seu dispositivo Android, você precisa ter pelo menos a versão 4.4 do sistema operacional. Além disso, você precisa ter pelo menos 100 MB de espaço livre na memória do seu dispositivo. O jogo não exige uma conexão com a internet para funcionar, mas é recomendável que você esteja conectado para receber as atualizações e as novidades do jogo. O jogo também funciona melhor em dispositivos com uma boa capacidade de processamento e uma boa resolução de tela, para aproveitar melhor os gráficos e os efeitos do jogo.

-

Alternativas para baixar o jogo no seu PC ou Mac

-

Se você preferir jogar o Solar Smash no seu PC ou Mac, você também pode fazer isso com a ajuda de um emulador de Android. Um emulador é um programa que simula o funcionamento de um dispositivo Android no seu computador, permitindo que você instale e execute aplicativos e jogos que são exclusivos para esse sistema. Existem vários emuladores disponíveis na internet, mas alguns dos mais populares são o BlueStacks, o NoxPlayer, e o MEmu. Para baixar o Solar Smash no seu PC ou Mac usando um emulador, você precisa seguir os passos abaixo:

-
    -
  1. Baixe e instale um emulador de Android no seu PC ou Mac.
  2. -
  3. Abra o emulador e faça login com a sua conta Google.
  4. -
  5. Acesse a Google Play Store ou um site confiável que ofereça o download do APK do Solar Smash.
  6. -
  7. Baixe e instale o Solar Smash no emulador.
  8. -
  9. Pronto! Agora você pode jogar o Solar Smash no seu PC ou Mac.
  10. -
-

Quais são as principais características de Solar Smash?

Modos de jogo: Planet Smash e System Smash

-

O Solar Smash possui dois modos de jogo principais: o Planet Smash e o System Smash. No Planet Smash, você pode escolher um planeta específico para destruir com as armas que você quiser. Você pode ver o planeta em diferentes ângulos e zooms, e acompanhar os danos causados pelas suas ações. Você também pode alterar as características do planeta, como a cor, o tamanho, a gravidade, e a atmosfera. No System Smash, você pode escolher um sistema solar inteiro para destruir com as armas que você quiser. Você pode ver os planetas orbitando em torno do sol, e interagir com eles de diversas formas. Você também pode alterar as características do sistema solar, como a distância entre os planetas, a velocidade da órbita, e a luminosidade do sol.

-

como baixar solar smash apk no android
-baixar solar smash apk mod dinheiro infinito
-baixar solar smash apk atualizado 2023
-baixar solar smash apk para pc
-baixar solar smash apk grátis
-baixar solar smash apk hackeado
-baixar solar smash apk versão antiga
-baixar solar smash apk pelo mediafire
-baixar solar smash apk sem anúncios
-baixar solar smash apk com tudo desbloqueado
-baixar solar smash apk: space strategy
-baixar solar smash apk pelo mega
-baixar solar smash apk com armas novas
-baixar solar smash apk para ios
-baixar solar smash apk offline
-baixar solar smash apk com multiplayer
-baixar solar smash apk com planetas personalizados
-baixar solar smash apk pelo aptoide
-baixar solar smash apk com modo sandbox
-baixar solar smash apk com missões
-baixar solar smash apk com gráficos melhorados
-baixar solar smash apk com efeitos sonoros
-baixar solar smash apk com controle de tempo
-baixar solar smash apk com zoom
-baixar solar smash apk com rotação de planetas
-baixar solar smash apk com física realista
-baixar solar smash apk com simulação de colisão
-baixar solar smash apk com modo criativo
-baixar solar smash apk com modo destruição
-baixar solar smash apk com modo sobrevivência
-baixar solar smash apk com modo campanha
-baixar solar smash apk com modo online
-baixar solar smash apk com modo cooperativo
-baixar solar smash apk com modo competitivo
-baixar solar smash apk com ranking mundial
-baixar solar smash apk com conquistas e troféus
-baixar solar smash apk com suporte a joystick
-baixar solar smash apk com suporte a VR
-baixar solar smash apk com suporte a 4K
-baixar solar smash apk com suporte a 60 FPS
-baixar solar smash apk sem vírus e malware
-baixar solar smash apk sem root e jailbreak
-baixar solar smash apk sem precisar de internet
-baixar solar smash apk sem precisar de cadastro
-baixar solar smash apk sem precisar de licença
-baixar solar smash apk sem precisar de atualização
-baixar solar smash apk sem precisar de espaço na memória
-baixar solar smash apk sem precisar de emulador
-baixar solar smash apk sem precisar de código de verificação

-

Armas e desastres: mísseis, lasers, asteroides e mais

-

O Solar Smash possui uma grande variedade de armas e desastres que você pode usar para destruir os planetas e os sistemas solares. Essas armas e desastres incluem:

- -

Planetas e sistemas: Terra, Marte, Júpiter e outros

-

O Solar Smash possui vários planetas e sistemas solares que você pode escolher para destruir. Esses planetas e sistemas solares são baseados em imagens reais da NASA, o que torna o jogo mais realista e interessante. Você pode ver os planetas como eles realmente são, com seus continentes, oceanos, nuvens, anéis, luas, etc. Você também pode ver os sistemas solares como eles realmente são, com seus planetas orbitando em torno do sol em diferentes velocidades e distâncias. Alguns dos planetas e sistemas solares que você pode escolher são:

- -

Planetas secretos: como desbloqueá-los e quais são

-

Além dos planetas e sistemas solares conhecidos, o Solar Smash também possui alguns planetas secretos que você pode desbloquear e destruir. Esses planetas secretos são baseados em referências da cultura pop, como filmes, séries, jogos, livros, etc. Para desbloquear os planetas secretos, você precisa seguir algumas dicas e pistas que o jogo te dá. Por exemplo, para desbloquear o planeta Tatooine, da saga Star Wars, você precisa usar a arma de laser verde no planeta Marte. Para desbloquear o planeta Namekusei, da série Dragon Ball Z, você precisa usar a arma de explosão solar no planeta Júpiter. Alguns dos planetas secretos que você pode desbloquear são:

- -

Quais são as dicas e truques para jogar Solar Smash?

Como completar os desafios e conquistas do jogo

-

O Solar Smash possui uma série de desafios e conquistas que você pode completar para ganhar recompensas e desbloquear novas armas e planetas. Esses desafios e conquistas são baseados em diferentes objetivos, como destruir um determinado número de planetas, usar uma determinada arma, causar um determinado tipo de dano, etc. Para ver os desafios e conquistas disponíveis, você pode acessar o menu do jogo e clicar no ícone de troféu. Lá, você pode ver os seus progressos e as suas recompensas. Alguns exemplos de desafios e conquistas são:

- -

Como acertar o ponto certo para destruir o núcleo do planeta

-

Uma das formas mais eficientes e satisfatórias de destruir um planeta no Solar Smash é acertar o seu núcleo com uma arma poderosa, como um laser ou um míssil. Isso vai causar uma explosão enorme que vai despedaçar o planeta em vários fragmentos. No entanto, acertar o núcleo do planeta não é tão fácil quanto parece. Você precisa ter uma boa mira e uma boa precisão para atingir o ponto certo. Uma dica para facilitar essa tarefa é usar o zoom para aproximar a imagem do planeta e ver melhor o seu centro. Outra dica é usar a arma de raios X para ver através do planeta e localizar o seu núcleo. Assim, você pode mirar com mais confiança e acertar o alvo com mais facilidade.

-

Como combinar as armas e os elementos para criar reações incríveis

-

Outra forma de se divertir no Solar Smash é combinar as diferentes armas e os diferentes elementos para criar reações incríveis e inesperadas. Você pode usar a sua criatividade e a sua curiosidade para experimentar as diversas possibilidades que o jogo oferece. Por exemplo, você pode usar a arma de chuva para molhar um planeta e depois usar a arma de raio para causar um choque elétrico. Ou você pode usar a arma de gelo para congelar um planeta e depois usar a arma de fogo para causar uma explosão térmica. Ou você pode usar a arma de gravidade para atrair vários asteroides para um planeta e depois usar a arma de buraco negro para sugá-los todos. As combinações são infinitas e podem surpreender você com os seus resultados.

-

Como personalizar as suas armas e os seus planetas

-

O Solar Smash também permite que você personalize as suas armas e os seus planetas, mudando vários aspectos como a cor, o tamanho, a velocidade, a gravidade, etc. Isso pode tornar o jogo mais divertido e mais desafiador, pois você pode criar cenários diferentes e testar os seus limites. Para personalizar as suas armas e os seus planetas, você precisa acessar o menu do jogo e clicar no ícone de engrenagem. Lá, você pode ver as opções disponíveis para cada arma e cada planeta, e ajustá-las conforme a sua preferência. Por exemplo, você pode mudar a cor do seu laser, o tamanho do seu asteroide, a velocidade do seu míssil, a gravidade do seu planeta, etc.

-

Conclusão

-

O Solar Smash é um jogo de simulação que permite que você destrua planetas e sistemas solares com diversas armas e desastres. O jogo é gratuito, divertido, realista, e viciante. Você pode baixar o APK do jogo no seu dispositivo Android ou no seu PC ou Mac com um emulador. Você também pode escolher entre vários planetas e sistemas solares conhecidos ou secret os, e personalizar as suas armas e os seus planetas. Você também pode completar desafios e conquistas, acertar o núcleo dos planetas, combinar as armas e os elementos, e criar reações incríveis. O Solar Smash é um jogo que vai te proporcionar horas de diversão e destruição. Baixe já o Solar Smash APK e comece a sua aventura espacial!

-

FAQs

-

O que é o Solar Smash?

-

O Solar Smash é um jogo de simulação que permite que você destrua planetas e sistemas solares com diversas armas e desastres.

-

Como baixar o Solar Smash APK?

-

Você pode baixar o Solar Smash APK no seu dispositivo Android ou no seu PC ou Mac com um emulador. Você precisa acessar um site confiável que ofereça o download do APK do jogo, baixar o arquivo, e instalá-lo no seu dispositivo.

-

Quais são as principais características do Solar Smash?

-

O Solar Smash possui dois modos de jogo: Planet Smash e System Smash. Você pode escolher entre vários planetas e sistemas solares conhecidos ou secretos, e personalizar as suas armas e os seus planetas. Você também pode completar desafios e conquistas, acertar o núcleo dos planetas, combinar as armas e os elementos, e criar reações incríveis.

-

Quais são as melhores armas para destruir os planetas?

-

Isso depende do seu gosto e do seu objetivo. Algumas das armas mais poderosas e divertidas são os lasers, os mísseis nucleares, os asteroides, os buracos negros, e as explosões solares.

-

Como desbloquear os planetas secretos?

-

Você precisa seguir algumas dicas e pistas que o jogo te dá. Por exemplo, para desbloquear o planeta Tatooine, da saga Star Wars, você precisa usar a arma de laser verde no planeta Marte. Para desbloquear o planeta Namekusei, da série Dragon Ball Z, você precisa usar a arma de explosão solar no planeta Júpiter.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/801artistry/RVC801/utils/backups_test.py b/spaces/801artistry/RVC801/utils/backups_test.py deleted file mode 100644 index f3edf15811b5035ee82f21e54e87b7e87ce413eb..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/utils/backups_test.py +++ /dev/null @@ -1,138 +0,0 @@ - -import os -import shutil -import hashlib -import time - -LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' -WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' -GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' - -def import_google_drive_backup(): - print("Importing Google Drive backup...") - GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path - LOGS_FOLDER = '/content/Applio-RVC-Fork/logs' - WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights' - weights_exist = False - files_to_copy = [] - weights_to_copy = [] - - def handle_files(root, files, is_weight_files=False): - for filename in files: - filepath = os.path.join(root, filename) - if filename.endswith('.pth') and is_weight_files: - weights_exist = True - backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - else: - backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH)) - backup_folderpath = os.path.dirname(backup_filepath) - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created folder: {backup_folderpath}', flush=True) - if is_weight_files: - weights_to_copy.append((filepath, backup_filepath)) - else: - files_to_copy.append((filepath, backup_filepath)) - - for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')): - handle_files(root, files) - - for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')): - handle_files(root, files, True) - - # Copy files in batches - total_files = len(files_to_copy) - start_time = time.time() - for i, (source, dest) in enumerate(files_to_copy, start=1): - with open(source, 'rb') as src, open(dest, 'wb') as dst: - shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size - # Report progress every 5 seconds or after every 100 files, whichever is less frequent - if time.time() - start_time > 5 or i % 100 == 0: - print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="") - start_time = time.time() - print(f'\nImported {len(files_to_copy)} files from Google Drive backup') - - # Copy weights in batches - total_weights = len(weights_to_copy) - start_time = time.time() - for i, (source, dest) in enumerate(weights_to_copy, start=1): - with open(source, 'rb') as src, open(dest, 'wb') as dst: - shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size - # Report progress every 5 seconds or after every 100 files, whichever is less frequent - if time.time() - start_time > 5 or i % 100 == 0: - print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="") - start_time = time.time() - if weights_exist: - print(f'\nImported {len(weights_to_copy)} weight files') - print("Copied weights from Google Drive backup to local weights folder.") - else: - print("\nNo weights found in Google Drive backup.") - print("Google Drive backup import completed.") - -def backup_files(): - print("\n Starting backup loop...") - last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt') - fully_updated = False # boolean to track if all files are up to date - try: - with open(last_backup_timestamps_path, 'r') as f: - last_backup_timestamps = dict(line.strip().split(':') for line in f) - except: - last_backup_timestamps = {} - - while True: - updated = False - files_to_copy = [] - files_to_delete = [] - - for root, dirs, files in os.walk(LOGS_FOLDER): - for filename in files: - if filename != 'last_backup_timestamps.txt': - filepath = os.path.join(root, filename) - if os.path.isfile(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - backup_folderpath = os.path.dirname(backup_filepath) - - if not os.path.exists(backup_folderpath): - os.makedirs(backup_folderpath) - print(f'Created backup folder: {backup_folderpath}', flush=True) - - # check if file has changed since last backup - last_backup_timestamp = last_backup_timestamps.get(filepath) - current_timestamp = os.path.getmtime(filepath) - if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp: - files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy - last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp - updated = True - fully_updated = False # if a file is updated, all files are not up to date - - # check if any files were deleted in Colab and delete them from the backup drive - for filepath in list(last_backup_timestamps.keys()): - if not os.path.exists(filepath): - backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER)) - if os.path.exists(backup_filepath): - files_to_delete.append(backup_filepath) # add to list of files to delete - del last_backup_timestamps[filepath] - updated = True - fully_updated = False # if a file is deleted, all files are not up to date - - # Copy files in batches - if files_to_copy: - for source, dest in files_to_copy: - shutil.copy2(source, dest) - print(f'Copied or updated {len(files_to_copy)} files') - - # Delete files in batches - if files_to_delete: - for file in files_to_delete: - os.remove(file) - print(f'Deleted {len(files_to_delete)} files') - - if not updated and not fully_updated: - print("Files are up to date.") - fully_updated = True # if all files are up to date, set the boolean to True - copy_weights_folder_to_drive() - - with open(last_backup_timestamps_path, 'w') as f: - for filepath, timestamp in last_backup_timestamps.items(): - f.write(f'{filepath}:{timestamp}\n') - time.sleep(15) # wait for 15 seconds before checking again diff --git a/spaces/AFRAC/NCM_DEMO/app.py b/spaces/AFRAC/NCM_DEMO/app.py deleted file mode 100644 index 9298bd93cdd66edf35ab0313391514b65078c0dd..0000000000000000000000000000000000000000 --- a/spaces/AFRAC/NCM_DEMO/app.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -import subprocess - -# Define the command you want to run -command = 'pip install --no-cache-dir gradio==3.26.0' - -# Use subprocess to run the command -try: - subprocess.check_call(command, shell=True) - print("Installation successful!") -except subprocess.CalledProcessError as e: - print(f"Installation failed with error: {e}") - -import pandas as pd -import numpy as np -import tensorflow as tf -from tensorflow.keras.preprocessing.text import Tokenizer -from sklearn.preprocessing import OneHotEncoder -import gradio as gr -from gradio import components - -print("\n\n\n****************************>>>>>>>>> GRADIO VERSION: ",gr.__version__,"\n\n\n") - -model = tf.keras.models.load_model("NCM_DEMO.H5", compile=False) - -ncm_table = pd.read_csv("https://raw.githubusercontent.com/mfilipak/AFRAC_IA/main/DATASET/TABELA_NCM.CSV", index_col="CODIGO") -valid_ncms = sorted(ncm_table[ncm_table.index > 1000000].index) -ncmst = np.array(valid_ncms) -ncmst = ncmst.reshape([-1,1]) -ohe = OneHotEncoder() -ohe.fit(ncmst) - -tk = Tokenizer(num_words=None, char_level=True, oov_token='UNK') -tk.word_index = {'UNK': 1, ' ': 2, 'a': 3, 'o': 4, 'e': 5, 'r': 6, 'i': 7, 'c': 8, 'l': 9, 's': 10, 't': 11, 'n': 12, 'm': 13, '0': 14, 'p': 15, 'g': 16, 'd': 17, 'u': 18, 'b': 19, '1': 20, 'f': 21, 'h': 22, '2': 23, '5': 24, 'v': 25, '3': 26, 'k': 27, '4': 28, '.': 29, 'x': 30, '6': 31, '8': 32, '-': 33, '7': 34, '9': 35, 'j': 36, 'z': 37, '/': 38, 'y': 39, 'q': 40, 'w': 41, ',': 42, ':': 43, '(': 44, ')': 45, '_': 46, '#': 47, '+': 48, '*': 49, '%': 50, '"': 51, "'": 52, 'ç': 53, '&': 54, 'ã': 55, ';': 56, ']': 57, '[': 58, '$': 59, 'á': 60, '\\': 61, '|': 62, 'é': 63, 'º': 64, 'ó': 65, '!': 66, '=': 67, 'í': 68, 'ê': 69, '?': 70, '>': 71, '@': 72, '¿': 73, '°': 74, 'ú': 75, '\xa0': 76, 'ô': 77, 'â': 78, '`': 79, 'à': 80, 'õ': 81, 'ï': 82, 'ª': 83, '²': 84, '{': 85, '<': 86, '~': 87, 'è': 88, '§': 89, 'ø': 90, 'ñ': 91, '³': 92, 'û': 93, 'ù': 94, '\xad': 95, '}': 96, '\x81': 97, 'ä': 98, 'ü': 99, '¶': 100, '^': 101, '€': 102, '¹': 103, 'µ': 104, '®': 105, '¡': 106} - -def PredictNCM(txt): - x = [txt[:120].lower() ] - - print(txt) - - X = np.array(tk.texts_to_sequences([_+(120-len(_))*" " for _ in x])) - pred = model.predict(X, verbose=0)[0] - aux = np.argsort(pred)[::-1][:5] - return {f"{int(valid_ncms[i]):08}":float(pred[i]) for i in aux}, ncm_table.loc[valid_ncms[aux[0]],"DESCRICAO"] - - -demo = gr.Interface(fn=PredictNCM, outputs=[components.Label(label="NCMs"), components.Textbox(label="Descrição do NCM")], title='AFRAC NOTA CERTA', - inputs=components.Textbox(label="DESCRIÇÃO"), - examples=["Coca-Cola PET 2l","Pepsi 500ml", "Guaraná Antarctica 2l", "Ração Bocão Premium","Mentos Kiss Morango", "Bombom Sonho de Valsa"]) -demo.launch() -#display(demo.launch(share=True)) -#demo.close() - diff --git a/spaces/AIFILMS/StyleGANEX/utils/common.py b/spaces/AIFILMS/StyleGANEX/utils/common.py deleted file mode 100644 index 4813fe311ee40720697e4862c5fbfad811d39237..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/utils/common.py +++ /dev/null @@ -1,87 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import matplotlib.pyplot as plt - - -# Log images -def log_input_image(x, opts): - if opts.label_nc == 0: - return tensor2im(x) - elif opts.label_nc == 1: - return tensor2sketch(x) - else: - return tensor2map(x) - - -def tensor2im(var): - var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy() - var = ((var + 1) / 2) - var[var < 0] = 0 - var[var > 1] = 1 - var = var * 255 - return Image.fromarray(var.astype('uint8')) - - -def tensor2map(var): - mask = np.argmax(var.data.cpu().numpy(), axis=0) - colors = get_colors() - mask_image = np.ones(shape=(mask.shape[0], mask.shape[1], 3)) - for class_idx in np.unique(mask): - mask_image[mask == class_idx] = colors[class_idx] - mask_image = mask_image.astype('uint8') - return Image.fromarray(mask_image) - - -def tensor2sketch(var): - im = var[0].cpu().detach().numpy() - im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) - im = (im * 255).astype(np.uint8) - return Image.fromarray(im) - - -# Visualization utils -def get_colors(): - # currently support up to 19 classes (for the celebs-hq-mask dataset) - colors = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], - [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], - [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]] - return colors - - -def vis_faces(log_hooks): - display_count = len(log_hooks) - fig = plt.figure(figsize=(8, 4 * display_count)) - gs = fig.add_gridspec(display_count, 3) - for i in range(display_count): - hooks_dict = log_hooks[i] - fig.add_subplot(gs[i, 0]) - if 'diff_input' in hooks_dict: - vis_faces_with_id(hooks_dict, fig, gs, i) - else: - vis_faces_no_id(hooks_dict, fig, gs, i) - plt.tight_layout() - return fig - - -def vis_faces_with_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face']) - plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input']))) - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']), - float(hooks_dict['diff_target']))) - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target']))) - - -def vis_faces_no_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face'], cmap="gray") - plt.title('Input') - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target') - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output') diff --git a/spaces/AIatUIUC/CodeLATS/app.py b/spaces/AIatUIUC/CodeLATS/app.py deleted file mode 100644 index c92b9453e7ab65bb8fdddbdb03369535efc1584c..0000000000000000000000000000000000000000 --- a/spaces/AIatUIUC/CodeLATS/app.py +++ /dev/null @@ -1,109 +0,0 @@ -import streamlit as st -import openai -import os -import sys -import argparse -sys.path.append('./lats') -from lats_main import lats_main - -st.set_page_config(layout="wide") - -# Initialize session state variables if they don't exist. -if 'response_content' not in st.session_state: - st.session_state.response_content = None - -# Creating main columns for the chat and runtime notifications -chat_col = st.container() - -chat_col.title("CodeLATS") -description = """This tech demo is an implementation of Language Agent Tree Search (LATS) (https://arxiv.org/abs/2310.04406) built specifically for generating code in the form of python functions. It achieves :green[**state-of-the-art**] results on HumanEval with a :green[**94.4% pass@1 rate**] on GPT-4. - -Listed below is an example programming problem (https://leetcode.com/problems/longest-valid-parentheses/description/) to get started with. - -```python -Given a string containing just the characters '(' and ')', return the length of the longest valid (well-formed) parentheses substring -``` -:red[**NOTE:**] On average a call for a HumanEval or Leetcode question will cost around 5-30 cents on GPT-4, using the default parameters. This value may change depending on problem difficulty and parameters. -""" - -chat_col.markdown(description) -sidebar = st.sidebar -# Runtime Section -runtime_container = st.container() - -# Parameters Section -sidebar.title("**An AI@UIUC Project** (https://uiuc.ai/)") -parameters_section = sidebar.expander("Parameters", expanded=False) -tree_width = parameters_section.number_input("Tree Width", min_value=1, max_value=5, value=1) -tree_depth = parameters_section.number_input("Tree Depth", min_value=1, max_value=8, value=3) -iterations = parameters_section.number_input("Iterations", min_value=1, max_value=4, value=2) -key = st.sidebar.text_input("Enter your OpenAI Api Key:", type="password") -sidebar.markdown('
', unsafe_allow_html=True) - -with sidebar: - runtime_container = st.container() - runtime_container.empty() - -runtime_messages = [] - -def make_args(instruction, tree_depth, tree_width, iterations): - parser = argparse.ArgumentParser() - - parser.add_argument("--strategy", default="mcts", help="Strategy to use") - parser.add_argument("--language", default="py", help="Programming language") - parser.add_argument("--model", default="gpt-4", help="Model type") - parser.add_argument("--max_iters", default=iterations, help="Maximum iterations") - parser.add_argument("--instruction", default=instruction, help="Instruction text") - parser.add_argument("--verbose", action="store_true", help="Verbose output") - parser.add_argument("--is_leetcode", action='store_true', - help="To run the leetcode benchmark") # Temporary - parser.add_argument("--n_samples", type=int, - help="The number of nodes added during expansion", default=tree_width) - parser.add_argument("--depth", type=int, - help="Tree depth", default=tree_depth) - args = parser.parse_args() - return args - -def run_querry(): - if user_input: - - # Create a new container for each subsequent message - runtime_container.write("Initiating process...") - - # Make it so that prints go to runtime_container writes instead - old_stdout = sys.stdout - sys.stdout = runtime_container - - with chat_col: - - with st.spinner('Running...'): - args = make_args(user_input, tree_depth, tree_width, iterations) - # main call - response = lats_main(args) - - sys.stdout = old_stdout - runtime_container.write("Response fetched.") - chat_col.markdown('
', unsafe_allow_html=True) - chat_col.write(f"```python\n{response} \n") - - return response - -# User input section at the bottom of the page -with chat_col: - user_input = st.text_area("Enter your message here:", placeholder="Type your message here...", label_visibility="collapsed") - button = st.button("Send") - - if button: - fail = False - if key == "": - st.warning("Missing OpenAI API Key") - fail = True - - if user_input == "": - st.warning("Missing a coding problem") - fail = True - - if (not fail): - openai.api_key = key - run_querry() - diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py deleted file mode 100644 index 2fc4d79f86b40c45d3f7692f32adc88295bbb4a4..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' - -deepen_factor = 1.33 -widen_factor = 1.25 -train_batch_size_per_gpu = 32 -train_num_workers = 8 - -# TODO: need to add pretrained_model -load_from = None - -model = dict( - backbone=dict( - deepen_factor=deepen_factor, - widen_factor=widen_factor, - ), - neck=dict( - deepen_factor=deepen_factor, - widen_factor=widen_factor, - ), - bbox_head=dict(head_module=dict(widen_factor=widen_factor))) - -train_dataloader = dict( - batch_size=train_batch_size_per_gpu, num_workers=train_num_workers) - -optim_wrapper = dict( - optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu)) diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest269.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest269.py deleted file mode 100644 index c37626f5678630383693d784d2590f27caa11de2..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest269.py +++ /dev/null @@ -1,25 +0,0 @@ -# model settings -model = dict( - type='ImageClassifier', - backbone=dict( - type='ResNeSt', - depth=269, - num_stages=4, - stem_channels=128, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='LinearClsHead', - num_classes=1000, - in_channels=2048, - loss=dict( - type='LabelSmoothLoss', - label_smooth_val=0.1, - num_classes=1000, - reduction='mean', - loss_weight=1.0), - topk=(1, 5), - cal_acc=False), - train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), -) diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/HuggingChat.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/HuggingChat.py deleted file mode 100644 index 1d500338ac910d8b5d181eb75d00c9158f795194..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/HuggingChat.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import json, uuid - -from aiohttp import ClientSession - -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies - - -class HuggingChat(AsyncGeneratorProvider): - url = "https://huggingface.co/chat" - needs_auth = True - working = True - model = "meta-llama/Llama-2-70b-chat-hf" - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = True, - proxy: str = None, - cookies: dict = None, - **kwargs - ) -> AsyncGenerator: - model = model if model else cls.model - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" - if not cookies: - cookies = get_cookies(".huggingface.co") - - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - } - async with ClientSession( - cookies=cookies, - headers=headers - ) as session: - async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response: - conversation_id = (await response.json())["conversationId"] - - send = { - "id": str(uuid.uuid4()), - "inputs": format_prompt(messages), - "is_retry": False, - "response_id": str(uuid.uuid4()), - "web_search": False - } - async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: - async for line in response.content: - line = json.loads(line[:-1]) - if "type" not in line: - raise RuntimeError(f"Response: {line}") - elif line["type"] == "stream": - yield line["token"] - elif line["type"] == "finalAnswer": - break - - async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/spaces/Adapter/CoAdapter/ldm/data/utils.py b/spaces/Adapter/CoAdapter/ldm/data/utils.py deleted file mode 100644 index 1c5696fefff628b31e77d98ec1f05047bb5762f5..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/data/utils.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- - -import cv2 -import numpy as np -from torchvision.transforms import transforms -from torchvision.transforms.functional import to_tensor -from transformers import CLIPProcessor - -from basicsr.utils import img2tensor - - -class AddCannyFreezeThreshold(object): - - def __init__(self, low_threshold=100, high_threshold=200): - self.low_threshold = low_threshold - self.high_threshold = high_threshold - - def __call__(self, sample): - # sample['jpg'] is PIL image - x = sample['jpg'] - img = cv2.cvtColor(np.array(x), cv2.COLOR_RGB2BGR) - canny = cv2.Canny(img, self.low_threshold, self.high_threshold)[..., None] - sample['canny'] = img2tensor(canny, bgr2rgb=True, float32=True) / 255. - sample['jpg'] = to_tensor(x) - return sample - - -class AddCannyRandomThreshold(object): - - def __init__(self, low_threshold=100, high_threshold=200, shift_range=50): - self.low_threshold = low_threshold - self.high_threshold = high_threshold - self.threshold_prng = np.random.RandomState() - self.shift_range = shift_range - - def __call__(self, sample): - # sample['jpg'] is PIL image - x = sample['jpg'] - img = cv2.cvtColor(np.array(x), cv2.COLOR_RGB2BGR) - low_threshold = self.low_threshold + self.threshold_prng.randint(-self.shift_range, self.shift_range) - high_threshold = self.high_threshold + self.threshold_prng.randint(-self.shift_range, self.shift_range) - canny = cv2.Canny(img, low_threshold, high_threshold)[..., None] - sample['canny'] = img2tensor(canny, bgr2rgb=True, float32=True) / 255. - sample['jpg'] = to_tensor(x) - return sample - - -class AddStyle(object): - - def __init__(self, version): - self.processor = CLIPProcessor.from_pretrained(version) - self.pil_to_tensor = transforms.ToTensor() - - def __call__(self, sample): - # sample['jpg'] is PIL image - x = sample['jpg'] - style = self.processor(images=x, return_tensors="pt")['pixel_values'][0] - sample['style'] = style - sample['jpg'] = to_tensor(x) - return sample diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/code_test.py b/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/code_test.py deleted file mode 100644 index 121aabc679b2bd53a92cdbfbe757d1b88075f123..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/tasksolving_env/rules/executor/code_test.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import annotations - -import os -import subprocess -import multiprocessing -from typing import TYPE_CHECKING, Any, List, Tuple - -from agentverse.logging import get_logger -from agentverse.agents import ExecutorAgent -from agentverse.message import ExecutorMessage, SolverMessage -from agentverse.logging import logger - -from . import BaseExecutor, executor_registry - - -def execute_command(command: str, result_list) -> str: - # TODO: make it more secure - result = subprocess.run(command, capture_output=True, shell=True, encoding="utf-8") - result_list.append(f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}") - # return f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" - - -@executor_registry.register("code-test") -class CodeTestExecutor(BaseExecutor): - has_test: dict = {} - timeout: int = 10 - - async def astep( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - solution = solution[0].content - os.makedirs("tmp", exist_ok=True) - self.write_to_file("tmp/main.py", solution) - manager = multiprocessing.Manager() - result = manager.list() - if task_description not in self.has_test: - response = (await agent.astep(task_description, solution)).content - self.write_to_file(response["file_path"], response["code"]) - self.has_test[task_description] = f"python {response['file_path']}" - p = multiprocessing.Process( - target=execute_command, args=(f"python {response['file_path']}", result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - # result = execute_command(f"python {response['file_path']}") - else: - # result = execute_command(self.has_test[task_description]) - p = multiprocessing.Process( - target=execute_command, args=(self.has_test[task_description], result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - if not result: - result.append("Execution timed out.") - return [ExecutorMessage(content=result[0], sender="Code Tester")] - - def step( - self, - agent: ExecutorAgent, - task_description: str, - solution: List[SolverMessage], - *args, - **kwargs, - ) -> Any: - solution = solution[0].content - os.makedirs("tmp", exist_ok=True) - self.write_to_file("tmp/main.py", solution) - manager = multiprocessing.Manager() - result = manager.list() - if task_description not in self.has_test: - response = agent.step(task_description, solution).content - self.write_to_file(response["file_path"], response["code"]) - self.has_test[task_description] = f"python {response['file_path']}" - p = multiprocessing.Process( - target=execute_command, args=(f"python {response['file_path']}", result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - # result = execute_command(f"python {response['file_path']}") - else: - # result = execute_command(self.has_test[task_description]) - p = multiprocessing.Process( - target=execute_command, args=(self.has_test[task_description], result) - ) - p.start() - p.join(timeout=self.timeout + 1) - if p.is_alive(): - p.kill() - if not result: - result.append("Execution timed out.") - return [ExecutorMessage(content=result[0], sender="Code Tester")] - - def write_to_file(self, file_name, file_content): - # TODO: generalize this method to a common tool - try: - with open(file_name, "w") as f: - f.write(file_content) - f.flush() - except: - logger.error(f"Failed to write to {file_name}") diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/ClickOutside.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/ClickOutside.js deleted file mode 100644 index 5527388e1dbf20ae4b9197790763693a0b740311..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/ClickOutside.js +++ /dev/null @@ -1,2 +0,0 @@ -import ClickOutside from '../../../plugins/clickoutside.js' -export default ClickOutside; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/SetItems.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/SetItems.js deleted file mode 100644 index 288da02b301d6f4bd0db7fa8b7933b18c35ea4c8..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/SetItems.js +++ /dev/null @@ -1,16 +0,0 @@ -var SetItems = function (items) { - if (items === undefined) { - this.items = []; - } else { - this.items = items; - } - - var table = this.childrenMap.child; - table.setCellsCount(this.items.length); - table.updateTable(true); - - this.resizeController(); - return this; -} - -export default SetItems; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/modal/Modal.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/modal/Modal.js deleted file mode 100644 index a7fc717714fd098684fd8990ad4f72e16a323cdf..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/modal/Modal.js +++ /dev/null @@ -1,2 +0,0 @@ -import { ModalBehavoir, Modal, ModalPromise, ModalClose } from '../../../plugins/modal.js'; -export { ModalBehavoir, Modal, ModalPromise, ModalClose }; \ No newline at end of file diff --git a/spaces/AhmedM20/Email_Marketing_Content_Generator/app.py b/spaces/AhmedM20/Email_Marketing_Content_Generator/app.py deleted file mode 100644 index a7f145ba3d6e53cbffcaed8b940a43931e8e6077..0000000000000000000000000000000000000000 --- a/spaces/AhmedM20/Email_Marketing_Content_Generator/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import pip -import os -SECRET_TOKEN = os.getenv("SECRET_TOKEN") - -def install(package): - if hasattr(pip, 'main'): - pip.main(['install', package]) - else: - pip._internal.main(['install', package]) - -# Example -if __name__ == '__main__': - install('cohere') -import cohere -import gradio as gr -co = cohere.Client(SECRET_TOKEN) # This is your trial API key - -def write_email(tone="",goal="",industry="",text="",audience="",other=""): - if goal=="Other": - goal=other - if audience=="" and industry=="": - print(f'write 5 different {tone} emails to {goal} {text}') - Message=f'write 5 different {tone} emails to {goal} {text}' - elif audience=="": - print(f'write 5 different {tone} emails to {goal} in the {industry} industry {text}') - Message=f'write 5 different {tone} emails to {goal} in the {industry} industry {text}' - elif industry=="": - print(f'write 5 different {tone} emails for {audience} to {goal} {text}') - Message=f'write 5 different {tone} emails for {audience} to {goal} {text}' - else: - print(f'write 5 different {tone} emails for {audience} to {goal} in the {industry} industry {text}') - Message=f'write 5 different {tone} emails for {audience} to {goal} in the {industry} industry {text}' - - response = co.generate( - model='command', - prompt=Message, - max_tokens=1208, - temperature=1, - k=0, - stop_sequences=[], - return_likelihoods='NONE') - return(response.generations[0].text) - - - -with gr.Blocks() as demo: - def other_field(choice): - if choice != "Other": - return gr.update(visible=False) - else: - return gr.update(visible=True) - gr.Markdown("Create your marketing emails with AI") - inp1 = gr.Radio( - ["Convince to buy a product", "Recover churned customers", "Teach a new concept","Onboard users","Share product updates","Other"], value="Convince to buy a product",label = "Campagin goal" - ) - other=gr.Textbox(visible=False,placeholder="Please enter other text",label = "Other:") - inp1.input(other_field,inp1, other) - inp2 = gr.Radio( - ["Formal", "Semi-formal", "Informal"], value="Formal",label = "Brand Tone" - ) - inp3 = gr.Textbox(placeholder="Example: marketing agency" ,label = "Industry") - inp4= gr.Textbox(placeholder="Example:Females aged between 18 and 30" ,label = "Target audience") - inp5 = gr.Textbox(placeholder="Example: I am offering 10 dollars discount for customers who cancelled their subscription and want to find a way to bring them back ", label = "Tell us more about the email you want to send") - btn = gr.Button("Generate 🚀") - out = gr.Textbox(label = "Here is your 5 Generated emails") - btn.click(fn=write_email, inputs=[inp2, inp1,inp3,inp5,inp4,other], outputs=out) - -demo.launch(debug = True) \ No newline at end of file diff --git a/spaces/AkitoP/umamusume_bert_vits2/text/japanese.py b/spaces/AkitoP/umamusume_bert_vits2/text/japanese.py deleted file mode 100644 index 7c1817ec91b6c438c7e12c32d7facafd13f04741..0000000000000000000000000000000000000000 --- a/spaces/AkitoP/umamusume_bert_vits2/text/japanese.py +++ /dev/null @@ -1,704 +0,0 @@ -# Convert Japanese text to phonemes which is -# compatible with Julius https://github.com/julius-speech/segmentation-kit -import re -import unicodedata - -from transformers import AutoTokenizer - -from text import punctuation, symbols - -try: - import MeCab -except ImportError as e: - raise ImportError("Japanese requires mecab-python3 and unidic-lite.") from e -from num2words import num2words - -_CONVRULES = [ - # Conversion of 2 letters - "アァ/ a a", - "イィ/ i i", - "イェ/ i e", - "イャ/ y a", - "ウゥ/ u:", - "エェ/ e e", - "オォ/ o:", - "カァ/ k a:", - "キィ/ k i:", - "クゥ/ k u:", - "クャ/ ky a", - "クュ/ ky u", - "クョ/ ky o", - "ケェ/ k e:", - "コォ/ k o:", - "ガァ/ g a:", - "ギィ/ g i:", - "グゥ/ g u:", - "グャ/ gy a", - "グュ/ gy u", - "グョ/ gy o", - "ゲェ/ g e:", - "ゴォ/ g o:", - "サァ/ s a:", - "シィ/ sh i:", - "スゥ/ s u:", - "スャ/ sh a", - "スュ/ sh u", - "スョ/ sh o", - "セェ/ s e:", - "ソォ/ s o:", - "ザァ/ z a:", - "ジィ/ j i:", - "ズゥ/ z u:", - "ズャ/ zy a", - "ズュ/ zy u", - "ズョ/ zy o", - "ゼェ/ z e:", - "ゾォ/ z o:", - "タァ/ t a:", - "チィ/ ch i:", - "ツァ/ ts a", - "ツィ/ ts i", - "ツゥ/ ts u:", - "ツャ/ ch a", - "ツュ/ ch u", - "ツョ/ ch o", - "ツェ/ ts e", - "ツォ/ ts o", - "テェ/ t e:", - "トォ/ t o:", - "ダァ/ d a:", - "ヂィ/ j i:", - "ヅゥ/ d u:", - "ヅャ/ zy a", - "ヅュ/ zy u", - "ヅョ/ zy o", - "デェ/ d e:", - "ドォ/ d o:", - "ナァ/ n a:", - "ニィ/ n i:", - "ヌゥ/ n u:", - "ヌャ/ ny a", - "ヌュ/ ny u", - "ヌョ/ ny o", - "ネェ/ n e:", - "ノォ/ n o:", - "ハァ/ h a:", - "ヒィ/ h i:", - "フゥ/ f u:", - "フャ/ hy a", - "フュ/ hy u", - "フョ/ hy o", - "ヘェ/ h e:", - "ホォ/ h o:", - "バァ/ b a:", - "ビィ/ b i:", - "ブゥ/ b u:", - "フャ/ hy a", - "ブュ/ by u", - "フョ/ hy o", - "ベェ/ b e:", - "ボォ/ b o:", - "パァ/ p a:", - "ピィ/ p i:", - "プゥ/ p u:", - "プャ/ py a", - "プュ/ py u", - "プョ/ py o", - "ペェ/ p e:", - "ポォ/ p o:", - "マァ/ m a:", - "ミィ/ m i:", - "ムゥ/ m u:", - "ムャ/ my a", - "ムュ/ my u", - "ムョ/ my o", - "メェ/ m e:", - "モォ/ m o:", - "ヤァ/ y a:", - "ユゥ/ y u:", - "ユャ/ y a:", - "ユュ/ y u:", - "ユョ/ y o:", - "ヨォ/ y o:", - "ラァ/ r a:", - "リィ/ r i:", - "ルゥ/ r u:", - "ルャ/ ry a", - "ルュ/ ry u", - "ルョ/ ry o", - "レェ/ r e:", - "ロォ/ r o:", - "ワァ/ w a:", - "ヲォ/ o:", - "ディ/ d i", - "デェ/ d e:", - "デャ/ dy a", - "デュ/ dy u", - "デョ/ dy o", - "ティ/ t i", - "テェ/ t e:", - "テャ/ ty a", - "テュ/ ty u", - "テョ/ ty o", - "スィ/ s i", - "ズァ/ z u a", - "ズィ/ z i", - "ズゥ/ z u", - "ズャ/ zy a", - "ズュ/ zy u", - "ズョ/ zy o", - "ズェ/ z e", - "ズォ/ z o", - "キャ/ ky a", - "キュ/ ky u", - "キョ/ ky o", - "シャ/ sh a", - "シュ/ sh u", - "シェ/ sh e", - "ショ/ sh o", - "チャ/ ch a", - "チュ/ ch u", - "チェ/ ch e", - "チョ/ ch o", - "トゥ/ t u", - "トャ/ ty a", - "トュ/ ty u", - "トョ/ ty o", - "ドァ/ d o a", - "ドゥ/ d u", - "ドャ/ dy a", - "ドュ/ dy u", - "ドョ/ dy o", - "ドォ/ d o:", - "ニャ/ ny a", - "ニュ/ ny u", - "ニョ/ ny o", - "ヒャ/ hy a", - "ヒュ/ hy u", - "ヒョ/ hy o", - "ミャ/ my a", - "ミュ/ my u", - "ミョ/ my o", - "リャ/ ry a", - "リュ/ ry u", - "リョ/ ry o", - "ギャ/ gy a", - "ギュ/ gy u", - "ギョ/ gy o", - "ヂェ/ j e", - "ヂャ/ j a", - "ヂュ/ j u", - "ヂョ/ j o", - "ジェ/ j e", - "ジャ/ j a", - "ジュ/ j u", - "ジョ/ j o", - "ビャ/ by a", - "ビュ/ by u", - "ビョ/ by o", - "ピャ/ py a", - "ピュ/ py u", - "ピョ/ py o", - "ウァ/ u a", - "ウィ/ w i", - "ウェ/ w e", - "ウォ/ w o", - "ファ/ f a", - "フィ/ f i", - "フゥ/ f u", - "フャ/ hy a", - "フュ/ hy u", - "フョ/ hy o", - "フェ/ f e", - "フォ/ f o", - "ヴァ/ b a", - "ヴィ/ b i", - "ヴェ/ b e", - "ヴォ/ b o", - "ヴュ/ by u", - "アー/ a:", - "イー/ i:", - "ウー/ u:", - "エー/ e:", - "オー/ o:", - "カー/ k a:", - "キー/ k i:", - "クー/ k u:", - "ケー/ k e:", - "コー/ k o:", - "サー/ s a:", - "シー/ sh i:", - "スー/ s u:", - "セー/ s e:", - "ソー/ s o:", - "ター/ t a:", - "チー/ ch i:", - "ツー/ ts u:", - "テー/ t e:", - "トー/ t o:", - "ナー/ n a:", - "ニー/ n i:", - "ヌー/ n u:", - "ネー/ n e:", - "ノー/ n o:", - "ハー/ h a:", - "ヒー/ h i:", - "フー/ f u:", - "ヘー/ h e:", - "ホー/ h o:", - "マー/ m a:", - "ミー/ m i:", - "ムー/ m u:", - "メー/ m e:", - "モー/ m o:", - "ラー/ r a:", - "リー/ r i:", - "ルー/ r u:", - "レー/ r e:", - "ロー/ r o:", - "ガー/ g a:", - "ギー/ g i:", - "グー/ g u:", - "ゲー/ g e:", - "ゴー/ g o:", - "ザー/ z a:", - "ジー/ j i:", - "ズー/ z u:", - "ゼー/ z e:", - "ゾー/ z o:", - "ダー/ d a:", - "ヂー/ j i:", - "ヅー/ z u:", - "デー/ d e:", - "ドー/ d o:", - "バー/ b a:", - "ビー/ b i:", - "ブー/ b u:", - "ベー/ b e:", - "ボー/ b o:", - "パー/ p a:", - "ピー/ p i:", - "プー/ p u:", - "ペー/ p e:", - "ポー/ p o:", - "ヤー/ y a:", - "ユー/ y u:", - "ヨー/ y o:", - "ワー/ w a:", - "ヰー/ i:", - "ヱー/ e:", - "ヲー/ o:", - "ヴー/ b u:", - # Conversion of 1 letter - "ア/ a", - "イ/ i", - "ウ/ u", - "エ/ e", - "オ/ o", - "カ/ k a", - "キ/ k i", - "ク/ k u", - "ケ/ k e", - "コ/ k o", - "サ/ s a", - "シ/ sh i", - "ス/ s u", - "セ/ s e", - "ソ/ s o", - "タ/ t a", - "チ/ ch i", - "ツ/ ts u", - "テ/ t e", - "ト/ t o", - "ナ/ n a", - "ニ/ n i", - "ヌ/ n u", - "ネ/ n e", - "ノ/ n o", - "ハ/ h a", - "ヒ/ h i", - "フ/ f u", - "ヘ/ h e", - "ホ/ h o", - "マ/ m a", - "ミ/ m i", - "ム/ m u", - "メ/ m e", - "モ/ m o", - "ラ/ r a", - "リ/ r i", - "ル/ r u", - "レ/ r e", - "ロ/ r o", - "ガ/ g a", - "ギ/ g i", - "グ/ g u", - "ゲ/ g e", - "ゴ/ g o", - "ザ/ z a", - "ジ/ j i", - "ズ/ z u", - "ゼ/ z e", - "ゾ/ z o", - "ダ/ d a", - "ヂ/ j i", - "ヅ/ z u", - "デ/ d e", - "ド/ d o", - "バ/ b a", - "ビ/ b i", - "ブ/ b u", - "ベ/ b e", - "ボ/ b o", - "パ/ p a", - "ピ/ p i", - "プ/ p u", - "ペ/ p e", - "ポ/ p o", - "ヤ/ y a", - "ユ/ y u", - "ヨ/ y o", - "ワ/ w a", - "ヰ/ i", - "ヱ/ e", - "ヲ/ o", - "ン/ N", - "ッ/ q", - "ヴ/ b u", - "ー/:", #这个不起作用 - # Try converting broken text - "ァ/ a", - "ィ/ i", - "ゥ/ u", - "ェ/ e", - "ォ/ o", - "ヮ/ w a", - "ォ/ o", - # Symbols - "、/ ,", - "。/ .", - "!/ !", - "?/ ?", - "・/ ,", -] - -_COLON_RX = re.compile(":+") -_REJECT_RX = re.compile("[^ a-zA-Z:,.?]") - - -def _makerulemap(): - l = [tuple(x.split("/")) for x in _CONVRULES] - return tuple({k: v for k, v in l if len(k) == i} for i in (1, 2)) - - -_RULEMAP1, _RULEMAP2 = _makerulemap() - - -def kata2phoneme(text: str) -> str: - """Convert katakana text to phonemes.""" - text = text.strip() - res = [] - while text: - if len(text) >= 2: - x = _RULEMAP2.get(text[:2]) - if x is not None: - text = text[2:] - res += x.split(" ")[1:] - continue - x = _RULEMAP1.get(text[0]) - if x is not None: - text = text[1:] - res += x.split(" ")[1:] - continue - res.append(text[0]) - text = text[1:] - # res = _COLON_RX.sub(":", res) - return res - - -_KATAKANA = "".join(chr(ch) for ch in range(ord("ァ"), ord("ン") + 1)) -_HIRAGANA = "".join(chr(ch) for ch in range(ord("ぁ"), ord("ん") + 1)) -_HIRA2KATATRANS = str.maketrans(_HIRAGANA, _KATAKANA) - - -def hira2kata(text: str) -> str: - text = text.translate(_HIRA2KATATRANS) - return text.replace("う゛", "ヴ") - - -_SYMBOL_TOKENS = set(list("・、。?!")) -_NO_YOMI_TOKENS = set(list("「」『』―()[][]")) -_TAGGER = MeCab.Tagger() - - -def text2kata(text: str) -> str: - parsed = _TAGGER.parse(text) - res = [] - for line in parsed.split("\n"): - if line == "EOS": - break - parts = line.split("\t") - - word, yomi = parts[0], parts[1] - if yomi: - res.append(yomi) - else: - if word in _SYMBOL_TOKENS: - res.append(word) - elif word in ("っ", "ッ"): - res.append("ッ") - elif word in _NO_YOMI_TOKENS: - pass - else: - res.append(word) - return hira2kata("".join(res)) - - -def text2sep_kata(text: str) -> (list, list): - parsed = _TAGGER.parse(text) - res = [] - sep = [] - for line in parsed.split("\n"): - if line == "EOS": - break - parts = line.split("\t") - - word, yomi = parts[0], parts[1] - if yomi: - res.append(yomi) - else: - if word in _SYMBOL_TOKENS: - res.append(word) - elif word in ("っ", "ッ"): - res.append("ッ") - elif word in _NO_YOMI_TOKENS: - pass - else: - res.append(word) - sep.append(word) - return sep, [hira2kata(i) for i in res] - - -_ALPHASYMBOL_YOMI = { - "#": "シャープ", - "%": "パーセント", - "&": "アンド", - "+": "プラス", - "-": "マイナス", - ":": "コロン", - ";": "セミコロン", - "<": "小なり", - "=": "イコール", - ">": "大なり", - "@": "アット", - "a": "エー", - "b": "ビー", - "c": "シー", - "d": "ディー", - "e": "イー", - "f": "エフ", - "g": "ジー", - "h": "エイチ", - "i": "アイ", - "j": "ジェー", - "k": "ケー", - "l": "エル", - "m": "エム", - "n": "エヌ", - "o": "オー", - "p": "ピー", - "q": "キュー", - "r": "アール", - "s": "エス", - "t": "ティー", - "u": "ユー", - "v": "ブイ", - "w": "ダブリュー", - "x": "エックス", - "y": "ワイ", - "z": "ゼット", - "α": "アルファ", - "β": "ベータ", - "γ": "ガンマ", - "δ": "デルタ", - "ε": "イプシロン", - "ζ": "ゼータ", - "η": "イータ", - "θ": "シータ", - "ι": "イオタ", - "κ": "カッパ", - "λ": "ラムダ", - "μ": "ミュー", - "ν": "ニュー", - "ξ": "クサイ", - "ο": "オミクロン", - "π": "パイ", - "ρ": "ロー", - "σ": "シグマ", - "τ": "タウ", - "υ": "ウプシロン", - "φ": "ファイ", - "χ": "カイ", - "ψ": "プサイ", - "ω": "オメガ", -} - - -_NUMBER_WITH_SEPARATOR_RX = re.compile("[0-9]{1,3}(,[0-9]{3})+") -_CURRENCY_MAP = {"$": "ドル", "¥": "円", "£": "ポンド", "€": "ユーロ"} -_CURRENCY_RX = re.compile(r"([$¥£€])([0-9.]*[0-9])") -_NUMBER_RX = re.compile(r"[0-9]+(\.[0-9]+)?") - - -def japanese_convert_numbers_to_words(text: str) -> str: - res = _NUMBER_WITH_SEPARATOR_RX.sub(lambda m: m[0].replace(",", ""), text) - res = _CURRENCY_RX.sub(lambda m: m[2] + _CURRENCY_MAP.get(m[1], m[1]), res) - res = _NUMBER_RX.sub(lambda m: num2words(m[0], lang="ja"), res) - return res - - -def japanese_convert_alpha_symbols_to_words(text: str) -> str: - return "".join([_ALPHASYMBOL_YOMI.get(ch, ch) for ch in text.lower()]) - - -def japanese_text_to_phonemes(text: str) -> str: - """Convert Japanese text to phonemes.""" - res = unicodedata.normalize("NFKC", text) - res = japanese_convert_numbers_to_words(res) - # res = japanese_convert_alpha_symbols_to_words(res) - res = text2kata(res) - res = kata2phoneme(res) - return res - - -def is_japanese_character(char): - # 定义日语文字系统的 Unicode 范围 - japanese_ranges = [ - (0x3040, 0x309F), # 平假名 - (0x30A0, 0x30FF), # 片假名 - (0x4E00, 0x9FFF), # 汉字 (CJK Unified Ideographs) - (0x3400, 0x4DBF), # 汉字扩展 A - (0x20000, 0x2A6DF), # 汉字扩展 B - # 可以根据需要添加其他汉字扩展范围 - ] - - # 将字符的 Unicode 编码转换为整数 - char_code = ord(char) - - # 检查字符是否在任何一个日语范围内 - for start, end in japanese_ranges: - if start <= char_code <= end: - return True - - return False - - -rep_map = { - ":": ",", - ";": ",", - ",": ",", - "。": ".", - "!": "!", - "?": "?", - "\n": ".", - "·": ",", - "、": ",", - "…": "...", -} - - -def replace_punctuation(text): - pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub( - r"[^\u3040-\u309F\u30A0-\u30FF\u4E00-\u9FFF\u3400-\u4DBF" - + "".join(punctuation) - + r"]+", - "", - replaced_text, - ) - - return replaced_text - - -def text_normalize(text): - res = unicodedata.normalize("NFKC", text) - res = japanese_convert_numbers_to_words(res) - # res = "".join([i for i in res if is_japanese_character(i)]) - res = replace_punctuation(res) - return res - - -def distribute_phone(n_phone, n_word): - phones_per_word = [0] * n_word - for task in range(n_phone): - min_tasks = min(phones_per_word) - min_index = phones_per_word.index(min_tasks) - phones_per_word[min_index] += 1 - return phones_per_word - - -tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - - -def g2p(norm_text): - sep_text, sep_kata = text2sep_kata(norm_text) - sep_tokenized = [tokenizer.tokenize(i) for i in sep_text] - sep_phonemes = [kata2phoneme(i) for i in sep_kata] - # 异常处理,MeCab不认识的词的话会一路传到这里来,然后炸掉。目前来看只有那些超级稀有的生僻词会出现这种情况 - for i in sep_phonemes: - for j in i: - assert j in symbols, (sep_text, sep_kata, sep_phonemes) - - word2ph = [] - for token, phoneme in zip(sep_tokenized, sep_phonemes): - phone_len = len(phoneme) - word_len = len(token) - - aaa = distribute_phone(phone_len, word_len) - word2ph += aaa - phones = ["_"] + [j for i in sep_phonemes for j in i] + ["_"] - tones = [0 for i in phones] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - -if __name__ == "__main__": - tokenizer = AutoTokenizer.from_pretrained("./bert/bert-base-japanese-v3") - text = "だったら私、スズカさんと同じチームに入りたいです! スズカさんの走りを毎日近くで、なんなら真横から見ていたいので!" - #print(_TAGGER.parse(text)) - # nodes = [{"surface": "こんにちは", "pos": "感動詞:*:*:*", "pron": "コンニチワ", "c_type": "*", "c_form": "*", "accent_type": 0, "accent_con_type": "-1", "chain_flag": -1}] - nodes = [{"surface":"こんにちは","pron": "コンニチワ","pos": "感動詞:*:*:*",}] - from text.japanese_bert import get_bert_feature - import pyopenjtalk - from marine.predict import Predictor - from marine.utils.openjtalk_util import convert_njd_feature_to_marine_feature - text = text_normalize(text) - NJD_NODES = pyopenjtalk.run_frontend(text) - predictor = Predictor() - # important_info = [{"string":i["string"],"pron":i["pron"],"acc":i["acc"]}for i in pyopenjtalk.estimate_accent(NJD_NODES)] - print(text) - - marine_feature = convert_njd_feature_to_marine_feature(NJD_NODES) - results = predictor.predict([marine_feature]) - for mora,acc in zip(results["mora"][0],results["accent_status"][0]): - print(f"{mora}:{acc}") - # for i in pyopenjtalk.estimate_accent(NJD_NODES): - # print(f"{i['string']}:{i['pron']}:{i['acc']}") -# info = pyopenjtalk.extract_fullcontext(text,run_marine=True) -# info_nomarine = pyopenjtalk.extract_fullcontext(text,run_marine=False) -# # nodes = pyopenjtalk -# # print(info) -# for i,j in zip(info,info_nomarine): -# print(i) -# print(j) -# print("\n") - # predictor = Predictor() - #print(pyopenjtalk.estimate_accent(text)) - # output = predictor.predict([nodes],accent_represent_mode="high_low") - #print(output) - # phones, tones, word2ph = g2p(text) - # bert = get_bert_feature(text, word2ph) - - # print(phones, tones, word2ph, bert.shape) diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/README.md b/spaces/Akmyradov/TurkmenTTSweSTT/uroman/README.md deleted file mode 100644 index 6a0a40f6d4ebda9041d23efe0345340b7da9d4b8..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# uroman - -*uroman* is a *universal romanizer*. It converts text in any script to the Latin alphabet. - -Version: 1.2.8 -Release date: April 23, 2021 -Author: Ulf Hermjakob, USC Information Sciences Institute - - -### Usage -```bash -$ uroman.pl [-l ] [--chart] [--no-cache] < STDIN - where the optional is a 3-letter languages code, e.g. ara, bel, bul, deu, ell, eng, fas, - grc, ell, eng, heb, kaz, kir, lav, lit, mkd, mkd2, oss, pnt, pus, rus, srp, srp2, tur, uig, ukr, yid. - --chart specifies chart output (in JSON format) to represent alternative romanizations. - --no-cache disables caching. -``` -### Examples -```bash -$ bin/uroman.pl < text/zho.txt -$ bin/uroman.pl -l tur < text/tur.txt -$ bin/uroman.pl -l heb --chart < text/heb.txt -$ bin/uroman.pl < test/multi-script.txt > test/multi-script.uroman.txt -``` - -Identifying the input as Arabic, Belarusian, Bulgarian, English, Farsi, German, -Ancient Greek, Modern Greek, Pontic Greek, Hebrew, Kazakh, Kyrgyz, Latvian, -Lithuanian, North Macedonian, Russian, Serbian, Turkish, Ukrainian, Uyghur or -Yiddish will improve romanization for those languages as some letters in those -languages have different sound values from other languages using the same script -(French, Russian, Hebrew respectively). -No effect for other languages in this version. - -### Bibliography -Ulf Hermjakob, Jonathan May, and Kevin Knight. 2018. Out-of-the-box universal romanization tool uroman. In Proceedings of the 56th Annual Meeting of Association for Computational Linguistics, Demo Track. ACL-2018 Best Demo Paper Award. [Paper in ACL Anthology](https://www.aclweb.org/anthology/P18-4003) | [Poster](https://www.isi.edu/~ulf/papers/poster-uroman-acl2018.pdf) | [BibTex](https://www.aclweb.org/anthology/P18-4003.bib) - -### Change History -Changes in version 1.2.8 - * Updated to Unicode 13.0 (2021), which supports several new scripts (10% larger UnicodeData.txt). - * Improved support for Georgian. - * Preserve various symbols (as opposed to mapping to the symbols' names). - * Various small improvements. - -Changes in version 1.2.7 - * Improved support for Pashto. - -Changes in version 1.2.6 - * Improved support for Ukrainian, Russian and Ogham (ancient Irish script). - * Added support for English Braille. - * Added alternative Romanization for North Macedonian and Serbian (mkd2/srp2) - reflecting a casual style that many native speakers of those languages use - when writing text in Latin script, e.g. non-accented single letters (e.g. "s") - rather than phonetically motivated combinations of letters (e.g. "sh"). - * When a line starts with "::lcode xyz ", the new uroman version will switch to - that language for that line. This is used for the new reference test file. - * Various small improvements. - -Changes in version 1.2.5 - * Improved support for Armenian and eight languages using Cyrillic scripts. - -- For Serbian and Macedonian, which are often written in both Cyrillic - and Latin scripts, uroman will map both official versions to the same - romanized text, e.g. both "Ниш" and "Niš" will be mapped to "Nish" (which - properly reflects the pronunciation of the city's name). - For both Serbian and Macedonian, casual writers often use a simplified - Latin form without diacritics, e.g. "s" to represent not only Cyrillic "с" - and Latin "s", but also "ш" or "š", even if this conflates "s" and "sh" and - other such pairs. The casual romanization can be simulated by using - alternative uroman language codes "srp2" and "mkd2", which romanize - both "Ниш" and "Niš" to "Nis" to reflect the casual Latin spelling. - * Various small improvements. - -Changes in version 1.2.4 - * Bug-fix that generated two emtpy lines for each empty line in cache mode. - -Changes in version 1.2 - * Run-time improvement based on (1) token-based caching and (2) shortcut - romanization (identity) of ASCII strings for default 1-best (non-chart) - output. Speed-up by a factor of 10 for Bengali and Uyghur on medium and - large size texts. - * Incremental improvements for Farsi, Amharic, Russian, Hebrew and related - languages. - * Richer lattice structure (more alternatives) for "Romanization" of English - to support better matching to romanizations of other languages. - Changes output only when --chart option is specified. No change in output for - default 1-best output, which for ASCII characters is always the input string. - -Changes in version 1.1 (major upgrade) - * Offers chart output (in JSON format) to represent alternative romanizations. - -- Location of first character is defined to be "line: 1, start:0, end:0". - * Incremental improvements of Hebrew and Greek romanization; Chinese numbers. - * Improved web-interface at http://www.isi.edu/~ulf/uroman.html - -- Shows corresponding original and romanization text in red - when hovering over a text segment. - -- Shows alternative romanizations when hovering over romanized text - marked by dotted underline. - -- Added right-to-left script detection and improved display for right-to-left - script text (as determined line by line). - -- On-page support for some scripts that are often not pre-installed on users' - computers (Burmese, Egyptian, Klingon). - -Changes in version 1.0 (major upgrade) - * Upgraded principal internal data structure from string to lattice. - * Improvements mostly in vowelization of South and Southeast Asian languages. - * Vocalic 'r' more consistently treated as vowel (no additional vowel added). - * Repetition signs (Japanese/Chinese/Thai/Khmer/Lao) are mapped to superscript 2. - * Japanese Katakana middle dots now mapped to ASCII space. - * Tibetan intersyllabic mark now mapped to middle dot (U+00B7). - * Some corrections regarding analysis of Chinese numbers. - * Many more foreign diacritics and punctuation marks dropped or mapped to ASCII. - * Zero-width characters dropped, except line/sentence-initial byte order marks. - * Spaces normalized to ASCII space. - * Fixed bug that in some cases mapped signs (such as dagger or bullet) to their verbal descriptions. - * Tested against previous version of uroman with a new uroman visual diff tool. - * Almost an order of magnitude faster. - -Changes in version 0.7 (minor upgrade) - * Added script uroman-quick.pl for Arabic script languages, incl. Uyghur. - Much faster, pre-caching mapping of Arabic to Latin characters, simple greedy processing. - Will not convert material from non-Arabic blocks such as any (somewhat unusual) Cyrillic - or Chinese characters in Uyghur texts. - -Changes in version 0.6 (minor upgrade) - * Added support for two letter characters used in Uzbek: - (1) character "ʻ" ("modifier letter turned comma", which modifies preceding "g" and "u" letters) - (2) character "ʼ" ("modifier letter apostrophe", which Uzbek uses to mark a glottal stop). - Both are now mapped to "'" (plain ASCII apostrophe). - * Added support for Uyghur vowel characters such as "ې" (Arabic e) and "ۆ" (Arabic oe) - even when they are not preceded by "ئ" (yeh with hamza above). - * Added support for Arabic semicolon "؛", Arabic ligature forms for phrases such as "ﷺ" - ("sallallahou alayhe wasallam" = "prayer of God be upon him and his family and peace") - * Added robustness for Arabic letter presentation forms (initial/medial/final/isolated). - However, it is strongly recommended to normalize any presentation form Arabic letters - to their non-presentation form before calling uroman. - * Added force flush directive ($|=1;). - -Changes in version 0.5 (minor upgrade) - * Improvements for Uyghur (make sure to use language option: -l uig) - -Changes in version 0.4 (minor upgrade) - * Improvements for Thai (special cases for vowel/consonant reordering, e.g. for "sara o"; dropped some aspiration 'h's) - * Minor change for Arabic (added "alef+fathatan" = "an") - -New features in version 0.3 - * Covers Mandarin (Chinese) - * Improved romanization for numerous languages - * Preserves capitalization (e.g. from Latin, Cyrillic, Greek scripts) - * Maps from native digits to Western numbers - * Faster for South Asian languages - -### Other features - * Web interface: http://www.isi.edu/~ulf/uroman.html - * Vowelization is provided when locally computable, e.g. for many South Asian languages and Tibetan. - -### Limitations - * The current version of uroman has a few limitations, some of which we plan to address in future versions. - For Japanese, *uroman* currently romanizes hiragana and katakana as expected, but kanji are interpreted as Chinese characters and romanized as such. - For Egyptian hieroglyphs, only single-sound phonetic characters and numbers are currently romanized. - For Linear B, only phonetic syllabic characters are romanized. - For some other extinct scripts such as cuneiform, no romanization is provided. - * A romanizer is not a full transliterator. For example, this version of - uroman does not vowelize text that lacks explicit vowelization such as - normal text in Arabic and Hebrew (without diacritics/points). - -### Acknowledgments -This research is based upon work supported in part by the Office of the Director of National Intelligence (ODNI), Intelligence Advanced Research Projects Activity (IARPA), via contract # FA8650-17-C-9116, and by research sponsored by Air Force Research Laboratory (AFRL) under agreement number FA8750-19-1-1000. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies, either expressed or implied, of ODNI, IARPA, Air Force Laboratory, DARPA, or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for governmental purposes notwithstanding any copyright annotation therein. diff --git a/spaces/AlanMars/QYL-AI-Space/modules/models/base_model.py b/spaces/AlanMars/QYL-AI-Space/modules/models/base_model.py deleted file mode 100644 index 01cee5b82d61b2a0369f6c8cd77553074776103a..0000000000000000000000000000000000000000 --- a/spaces/AlanMars/QYL-AI-Space/modules/models/base_model.py +++ /dev/null @@ -1,592 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import traceback -import pathlib - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from enum import Enum - -from ..presets import * -from ..llama_func import * -from ..utils import * -from .. import shared -from ..config import retrieve_proxy - - -class ModelType(Enum): - Unknown = -1 - OpenAI = 0 - ChatGLM = 1 - LLaMA = 2 - XMChat = 3 - StableLM = 4 - MOSS = 5 - YuanAI = 6 - - @classmethod - def get_type(cls, model_name: str): - model_type = None - model_name_lower = model_name.lower() - if "gpt" in model_name_lower: - model_type = ModelType.OpenAI - elif "chatglm" in model_name_lower: - model_type = ModelType.ChatGLM - elif "llama" in model_name_lower or "alpaca" in model_name_lower: - model_type = ModelType.LLaMA - elif "xmchat" in model_name_lower: - model_type = ModelType.XMChat - elif "stablelm" in model_name_lower: - model_type = ModelType.StableLM - elif "moss" in model_name_lower: - model_type = ModelType.MOSS - elif "yuanai" in model_name_lower: - model_type = ModelType.YuanAI - else: - model_type = ModelType.Unknown - return model_type - - -class BaseLLMModel: - def __init__( - self, - model_name, - system_prompt="", - temperature=1.0, - top_p=1.0, - n_choices=1, - stop=None, - max_generation_token=None, - presence_penalty=0, - frequency_penalty=0, - logit_bias=None, - user="", - ) -> None: - self.history = [] - self.all_token_counts = [] - self.model_name = model_name - self.model_type = ModelType.get_type(model_name) - try: - self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name] - except KeyError: - self.token_upper_limit = DEFAULT_TOKEN_LIMIT - self.interrupted = False - self.system_prompt = system_prompt - self.api_key = None - self.need_api_key = False - self.single_turn = False - - self.temperature = temperature - self.top_p = top_p - self.n_choices = n_choices - self.stop_sequence = stop - self.max_generation_token = None - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.logit_bias = logit_bias - self.user_identifier = user - - def get_answer_stream_iter(self): - """stream predict, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - should return a generator, each time give the next word (str) in the answer - """ - logging.warning("stream predict not implemented, using at once predict instead") - response, _ = self.get_answer_at_once() - yield response - - def get_answer_at_once(self): - """predict at once, need to be implemented - conversations are stored in self.history, with the most recent question, in OpenAI format - Should return: - the answer (str) - total token count (int) - """ - logging.warning("at once predict not implemented, using stream predict instead") - response_iter = self.get_answer_stream_iter() - count = 0 - for response in response_iter: - count += 1 - return response, sum(self.all_token_counts) + count - - def billing_info(self): - """get billing infomation, inplement if needed""" - logging.warning("billing info not implemented, using default") - return BILLING_NOT_APPLICABLE_MSG - - def count_token(self, user_input): - """get token count from input, implement if needed""" - # logging.warning("token count not implemented, using default") - return len(user_input) - - def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""): - def get_return_value(): - return chatbot, status_text - - status_text = i18n("开始实时传输回答……") - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - logging.debug(f"输入token计数: {user_token_count}") - - stream_iter = self.get_answer_stream_iter() - - for partial_text in stream_iter: - chatbot[-1] = (chatbot[-1][0], partial_text + display_append) - self.all_token_counts[-1] += 1 - status_text = self.token_message() - yield get_return_value() - if self.interrupted: - self.recover() - break - self.history.append(construct_assistant(partial_text)) - - def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""): - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - if fake_input is not None: - user_token_count = self.count_token(fake_input) - else: - user_token_count = self.count_token(inputs) - self.all_token_counts.append(user_token_count) - ai_reply, total_token_count = self.get_answer_at_once() - self.history.append(construct_assistant(ai_reply)) - if fake_input is not None: - self.history[-2] = construct_user(fake_input) - chatbot[-1] = (chatbot[-1][0], ai_reply + display_append) - if fake_input is not None: - self.all_token_counts[-1] += count_token(construct_assistant(ai_reply)) - else: - self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts) - status_text = self.token_message() - return chatbot, status_text - - def handle_file_upload(self, files, chatbot): - """if the model accepts multi modal input, implement this function""" - status = gr.Markdown.update() - if files: - construct_index(self.api_key, file_src=files) - status = "索引构建完成" - return gr.Files.update(), chatbot, status - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = None - display_append = [] - limited_context = False - fake_inputs = real_inputs - if files: - from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery - from llama_index.indices.query.schema import QueryBundle - from langchain.embeddings.huggingface import HuggingFaceEmbeddings - from langchain.chat_models import ChatOpenAI - from llama_index import ( - GPTSimpleVectorIndex, - ServiceContext, - LangchainEmbedding, - OpenAIEmbedding, - ) - limited_context = True - msg = "加载索引中……" - logging.info(msg) - # yield chatbot + [(inputs, "")], msg - index = construct_index(self.api_key, file_src=files) - assert index is not None, "获取索引失败" - msg = "索引获取成功,生成回答中……" - logging.info(msg) - if local_embedding or self.model_type != ModelType.OpenAI: - embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name = "sentence-transformers/distiluse-base-multilingual-cased-v2")) - else: - embed_model = OpenAIEmbedding() - # yield chatbot + [(inputs, "")], msg - with retrieve_proxy(): - prompt_helper = PromptHelper( - max_input_size=4096, - num_output=5, - max_chunk_overlap=20, - chunk_size_limit=600, - ) - from llama_index import ServiceContext - - service_context = ServiceContext.from_defaults( - prompt_helper=prompt_helper, embed_model=embed_model - ) - query_object = GPTVectorStoreIndexQuery( - index.index_struct, - service_context=service_context, - similarity_top_k=5, - vector_store=index._vector_store, - docstore=index._docstore, - response_synthesizer=None - ) - query_bundle = QueryBundle(real_inputs) - nodes = query_object.retrieve(query_bundle) - reference_results = [n.node.text for n in nodes] - reference_results = add_source_numbers(reference_results, use_source=False) - display_append = add_details(reference_results) - display_append = "\n\n" + "".join(display_append) - real_inputs = ( - replace_today(PROMPT_TEMPLATE) - .replace("{query_str}", real_inputs) - .replace("{context_str}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - elif use_websearch: - limited_context = True - search_results = ddg(real_inputs, max_results=5) - reference_results = [] - for idx, result in enumerate(search_results): - logging.debug(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - reference_results.append([result["body"], result["href"]]) - display_append.append( - # f"{idx+1}. [{domain_name}]({result['href']})\n" - f"
  • {domain_name}
  • \n" - ) - reference_results = add_source_numbers(reference_results) - display_append = "
      \n\n" + "".join(display_append) + "
    " - real_inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", real_inputs) - .replace("{web_results}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language) - ) - else: - display_append = "" - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def predict( - self, - inputs, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - should_check_token_count=True, - ): # repetition_penalty, top_k - - status_text = "开始生成回答……" - logging.info( - "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL - ) - if should_check_token_count: - yield chatbot + [(inputs, "")], status_text - if reply_language == "跟随问题语言(不稳定)": - reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." - - limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot) - yield chatbot + [(fake_inputs, "")], status_text - - if ( - self.need_api_key and - self.api_key is None - and not shared.state.multi_api_key - ): - status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG - logging.info(status_text) - chatbot.append((inputs, "")) - if len(self.history) == 0: - self.history.append(construct_user(inputs)) - self.history.append("") - self.all_token_counts.append(0) - else: - self.history[-2] = construct_user(inputs) - yield chatbot + [(inputs, "")], status_text - return - elif len(inputs.strip()) == 0: - status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG - logging.info(status_text) - yield chatbot + [(inputs, "")], status_text - return - - if self.single_turn: - self.history = [] - self.all_token_counts = [] - self.history.append(construct_user(inputs)) - - try: - if stream: - logging.debug("使用流式传输") - iter = self.stream_next_chatbot( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - for chatbot, status_text in iter: - yield chatbot, status_text - else: - logging.debug("不使用流式传输") - chatbot, status_text = self.next_chatbot_at_once( - inputs, - chatbot, - fake_input=fake_inputs, - display_append=display_append, - ) - yield chatbot, status_text - except Exception as e: - traceback.print_exc() - status_text = STANDARD_ERROR_MSG + str(e) - yield chatbot, status_text - - if len(self.history) > 1 and self.history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{self.history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if limited_context: - # self.history = self.history[-4:] - # self.all_token_counts = self.all_token_counts[-2:] - self.history = [] - self.all_token_counts = [] - - max_token = self.token_upper_limit - TOKEN_OFFSET - - if sum(self.all_token_counts) > max_token and should_check_token_count: - count = 0 - while ( - sum(self.all_token_counts) - > self.token_upper_limit * REDUCE_TOKEN_FACTOR - and sum(self.all_token_counts) > 0 - ): - count += 1 - del self.all_token_counts[0] - del self.history[:2] - logging.info(status_text) - status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话" - yield chatbot, status_text - - self.auto_save(chatbot) - - def retry( - self, - chatbot, - stream=False, - use_websearch=False, - files=None, - reply_language="中文", - ): - logging.debug("重试中……") - if len(self.history) > 0: - inputs = self.history[-2]["content"] - del self.history[-2:] - self.all_token_counts.pop() - elif len(chatbot) > 0: - inputs = chatbot[-1][0] - else: - yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的" - return - - iter = self.predict( - inputs, - chatbot, - stream=stream, - use_websearch=use_websearch, - files=files, - reply_language=reply_language, - ) - for x in iter: - yield x - logging.debug("重试完毕") - - # def reduce_token_size(self, chatbot): - # logging.info("开始减少token数量……") - # chatbot, status_text = self.next_chatbot_at_once( - # summarize_prompt, - # chatbot - # ) - # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR - # num_chat = find_n(self.all_token_counts, max_token_count) - # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats") - # chatbot = chatbot[:-1] - # self.history = self.history[-2*num_chat:] if num_chat > 0 else [] - # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else [] - # msg = f"保留了最近{num_chat}轮对话" - # logging.info(msg) - # logging.info("减少token数量完毕") - # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0]) - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_token_upper_limit(self, new_upper_limit): - self.token_upper_limit = new_upper_limit - print(f"token上限设置为{new_upper_limit}") - - def set_temperature(self, new_temperature): - self.temperature = new_temperature - - def set_top_p(self, new_top_p): - self.top_p = new_top_p - - def set_n_choices(self, new_n_choices): - self.n_choices = new_n_choices - - def set_stop_sequence(self, new_stop_sequence: str): - new_stop_sequence = new_stop_sequence.split(",") - self.stop_sequence = new_stop_sequence - - def set_max_tokens(self, new_max_tokens): - self.max_generation_token = new_max_tokens - - def set_presence_penalty(self, new_presence_penalty): - self.presence_penalty = new_presence_penalty - - def set_frequency_penalty(self, new_frequency_penalty): - self.frequency_penalty = new_frequency_penalty - - def set_logit_bias(self, logit_bias): - logit_bias = logit_bias.split() - bias_map = {} - encoding = tiktoken.get_encoding("cl100k_base") - for line in logit_bias: - word, bias_amount = line.split(":") - if word: - for token in encoding.encode(word): - bias_map[token] = float(bias_amount) - self.logit_bias = bias_map - - def set_user_identifier(self, new_user_identifier): - self.user_identifier = new_user_identifier - - def set_system_prompt(self, new_system_prompt): - self.system_prompt = new_system_prompt - - def set_key(self, new_access_key): - self.api_key = new_access_key.strip() - msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key) - logging.info(msg) - return self.api_key, msg - - def set_single_turn(self, new_single_turn): - self.single_turn = new_single_turn - - def reset(self): - self.history = [] - self.all_token_counts = [] - self.interrupted = False - pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(os.path.join(HISTORY_DIR, self.user_identifier)))).touch() - return [], self.token_message([0]) - - def delete_first_conversation(self): - if self.history: - del self.history[:2] - del self.all_token_counts[0] - return self.token_message() - - def delete_last_conversation(self, chatbot): - if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]: - msg = "由于包含报错信息,只删除chatbot记录" - chatbot.pop() - return chatbot, self.history - if len(self.history) > 0: - self.history.pop() - self.history.pop() - if len(chatbot) > 0: - msg = "删除了一组chatbot对话" - chatbot.pop() - if len(self.all_token_counts) > 0: - msg = "删除了一组对话的token计数记录" - self.all_token_counts.pop() - msg = "删除了一组对话" - return chatbot, msg - - def token_message(self, token_lst=None): - if token_lst is None: - token_lst = self.all_token_counts - token_sum = 0 - for i in range(len(token_lst)): - token_sum += sum(token_lst[: i + 1]) - return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens" - - def save_chat_history(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def auto_save(self, chatbot): - history_file_path = get_history_filepath(self.user_identifier) - save_file(history_file_path, self.system_prompt, self.history, chatbot, self.user_identifier) - - def export_markdown(self, filename, chatbot, user_name): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, self.system_prompt, self.history, chatbot, user_name) - - def load_chat_history(self, filename, user_name): - logging.debug(f"{user_name} 加载对话历史中……") - logging.info(f"filename: {filename}") - if type(filename) != str and filename is not None: - filename = filename.name - try: - if "/" not in filename: - history_file_path = os.path.join(HISTORY_DIR, user_name, filename) - else: - history_file_path = filename - with open(history_file_path, "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - pass - logging.debug(f"{user_name} 加载对话历史完毕") - self.history = json_s["history"] - return os.path.basename(filename), json_s["system"], json_s["chatbot"] - except: - # 没有对话历史或者对话历史解析失败 - logging.info(f"没有找到对话历史记录 {filename}") - return gr.update(), self.system_prompt, gr.update() - - def auto_load(self): - if self.user_identifier == "": - self.reset() - return self.system_prompt, gr.update() - history_file_path = get_history_filepath(self.user_identifier) - filename, system_prompt, chatbot = self.load_chat_history(history_file_path, self.user_identifier) - return system_prompt, chatbot - - def like(self): - """like the last response, implement if needed - """ - return gr.update() - - def dislike(self): - """dislike the last response, implement if needed - """ - return gr.update() diff --git a/spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_wer.py b/spaces/AlhitawiMohammed22/CER_Hu-Evaluation-Metrics/test_eval_wer.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index b371ed757bf7dd95ef9ecfc2e609ca5ab03795d6..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] - -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe')) - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnext.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnext.py deleted file mode 100644 index 6dbcbd516fd308b1d703eecb83ab275f6b159516..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/resnext.py +++ /dev/null @@ -1,153 +0,0 @@ -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - if self.with_plugins: - self._del_block_plugins(self.after_conv1_plugin_names + - self.after_conv2_plugin_names + - self.after_conv3_plugin_names) - self.after_conv1_plugin_names = self.make_block_plugins( - width, self.after_conv1_plugins) - self.after_conv2_plugin_names = self.make_block_plugins( - width, self.after_conv2_plugins) - self.after_conv3_plugin_names = self.make_block_plugins( - self.planes * self.expansion, self.after_conv3_plugins) - - def _del_block_plugins(self, plugin_names): - """delete plugins for block if exist. - - Args: - plugin_names (list[str]): List of plugins name to delete. - """ - assert isinstance(plugin_names, list) - for plugin_name in plugin_names: - del self._modules[plugin_name] - - -@BACKBONES.register_module() -class ResNeXt(ResNet): - """ResNeXt backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Resnet stages. Default: 4. - groups (int): Group of resnext. - base_width (int): Base width of resnext. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``""" - return ResLayer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/AndySAnker/DeepStruc/app.py b/spaces/AndySAnker/DeepStruc/app.py deleted file mode 100644 index c01386499d591dd65858979f493b12d7e97335c1..0000000000000000000000000000000000000000 --- a/spaces/AndySAnker/DeepStruc/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import streamlit as st -import io, os, argparse, torch, random -import pytorch_lightning as pl -import numpy as np -from predict import main -from tools.utils import plot_ls - -seed = 37 -torch.manual_seed(seed) -pl.seed_everything(seed) -torch.manual_seed(seed) -np.random.seed(seed) -random.seed(seed) - -st.title('DeepStruc') - -st.write('Welcome to DeepStruc that is a Deep Generative Model which has been trained to solve a mono-metallic structure (<200 atoms) based on a PDF!') -st.write('Upload a PDF to use DeepStruc to predict the structure.') - - -# Define the file upload widget -pdf_file = st.file_uploader("Upload PDF file in .gr format", type=["gr"]) - -# Define the form to get the other parameters -num_structures = st.number_input("Number of structures to generate", min_value=1, max_value=100, value=10) -structure_index = st.number_input("Index of structure to visualize", min_value=0, value=3) -sigma = st.number_input("Standard deviation for sampling", min_value=0.1, value=3.0) - -# Define parser -parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -args = parser.parse_args() -args.num_samples = num_structures -args.index_plot = structure_index -args.sigma = sigma -# Fixed for DeepStruc app -args.model = 'DeepStruc' -args.save_path = './' - -# Define the predict button and its behavior -if st.button("Generate structures"): - if pdf_file is None: - st.warning("Please upload a PDF file.") - else: - # Get the contents of the file as bytes - file_bytes = pdf_file.read() - - # Save the contents of the file to disk - with open("uploaded_file.gr", "wb") as f: - f.write(file_bytes) - - df, index_highlight, these_cords = main(args) - - # Plot the latent space - fig = plot_ls(df, index_highlight) - st.pyplot(fig) - st.write('**The two-dimensional latent space with location of the input.** The size of the points relates to the size of the embedded structure. Each point is coloured after its structure type, FCC (light blue), octahedral (dark grey), decahedral (orange), BCC (green), icosahedral (dark blue), HCP (pink) and SC (red). Each point in the latent space corresponds to a structure based on its simulated PDF. Test data point are plotted on top of the training and validation data, which is made semi-transparent. The latent space locations of the reconstructed structures from the input are shown with black markers and the specific reconstructed structure that is shown in the next box is shown with a black and white marker.') - - # Define the save directory and file name - file_name = "DeepStruc_prediction.xyz" - - # Define a download button to download the file - def download_button(file_name, button_text): - with open(file_name, "rb") as f: - bytes = f.read() - st.download_button( - label=button_text, - data=bytes, - file_name=file_name, - mime="text/xyz",) - - # Save the coordinates to a file and display a download button - np.savetxt(file_name, these_cords, fmt="%s") - download_button(file_name, "Download XYZ file") - - - -st.subheader('Cite') - -st.write('If you use DeepStruc, our code or results, please consider citing our papers. Thanks in advance!') - -st.write('DeepStruc: Towards structure solution from pair distribution function data using deep generative models **2023** (https://pubs.rsc.org/en/content/articlehtml/2022/dd/d2dd00086e)') -st.write('Characterising the atomic structure of mono-metallic nanoparticles from x-ray scattering data using conditional generative models **2020** (https://chemrxiv.org/engage/chemrxiv/article-details/60c74dd1842e6514f2db3527)') - -st.subheader('LICENSE') - -st.write('This project is licensed under the Apache License Version 2.0, January 2004 - see the LICENSE file at https://github.com/EmilSkaaning/DeepStruc/blob/main/LICENSE.md for details.') -st.write("") - -st.subheader('Github') -st.write('https://github.com/EmilSkaaning/DeepStruc') - -st.subheader('Questions') -st.write('andy@chem.ku.dk or etsk@chem.ku.dk') - diff --git a/spaces/Aniquel/WizApp_Code_Generator/app.py b/spaces/Aniquel/WizApp_Code_Generator/app.py deleted file mode 100644 index 38bc6f2be220cad566d7a6613326d43c43899191..0000000000000000000000000000000000000000 --- a/spaces/Aniquel/WizApp_Code_Generator/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -import openai -import os - -openai.api_key = os.getenv("OPENAI_API_KEY") - - -def generate_response(text): - prompt = f"Code generation:\n\n```python\n{text}\n```" - response = openai.Completion.create( - model=gpt-3.5-turbo, - prompt=prompt, - max_tokens=3000, - n=1, - stop=None, - temperature=0.2, - ) - message = response.choices[0].text.strip() - return message - -iface = gr.Interface( - fn=generate_response, - inputs=gr.inputs.Textbox(label="Enter your code here"), - outputs=gr.outputs.Textbox(label="Chatbot's response"), - title="WizApp Code Generation", - description="Use AI to generate code based on your input", - theme="default" -) - -if __name__ == "__main__": - iface.launch() diff --git a/spaces/Ariharasudhan/YoloV5/utils/segment/general.py b/spaces/Ariharasudhan/YoloV5/utils/segment/general.py deleted file mode 100644 index b526333dc5a1b8625d7e6a51ee6ba41818c62adb..0000000000000000000000000000000000000000 --- a/spaces/Ariharasudhan/YoloV5/utils/segment/general.py +++ /dev/null @@ -1,137 +0,0 @@ -import cv2 -import numpy as np -import torch -import torch.nn.functional as F - - -def crop_mask(masks, boxes): - """ - "Crop" predicted masks by zeroing out everything not in the predicted bbox. - Vectorized by Chong (thanks Chong). - - Args: - - masks should be a size [h, w, n] tensor of masks - - boxes should be a size [n, 4] tensor of bbox coords in relative point form - """ - - n, h, w = masks.shape - x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) - r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) - c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) - - return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) - - -def process_mask_upsample(protos, masks_in, bboxes, shape): - """ - Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - masks = crop_mask(masks, bboxes) # CHW - return masks.gt_(0.5) - - -def process_mask(protos, masks_in, bboxes, shape, upsample=False): - """ - Crop before upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - ih, iw = shape - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW - - downsampled_bboxes = bboxes.clone() - downsampled_bboxes[:, 0] *= mw / iw - downsampled_bboxes[:, 2] *= mw / iw - downsampled_bboxes[:, 3] *= mh / ih - downsampled_bboxes[:, 1] *= mh / ih - - masks = crop_mask(masks, downsampled_bboxes) # CHW - if upsample: - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - return masks.gt_(0.5) - - -def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): - """ - img1_shape: model input shape, [h, w] - img0_shape: origin pic shape, [h, w, 3] - masks: [h, w, num] - """ - # Rescale coordinates (xyxy) from im1_shape to im0_shape - if ratio_pad is None: # calculate from im0_shape - gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new - pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding - else: - pad = ratio_pad[1] - top, left = int(pad[1]), int(pad[0]) # y, x - bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) - - if len(masks.shape) < 2: - raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') - masks = masks[top:bottom, left:right] - # masks = masks.permute(2, 0, 1).contiguous() - # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] - # masks = masks.permute(1, 2, 0).contiguous() - masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) - - if len(masks.shape) == 2: - masks = masks[:, :, None] - return masks - - -def mask_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [M, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, [N, M] - """ - intersection = torch.matmul(mask1, mask2.t()).clamp(0) - union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [N, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, (N, ) - """ - intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) - union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks2segments(masks, strategy='largest'): - # Convert masks(n,160,160) into segments(n,xy) - segments = [] - for x in masks.int().cpu().numpy().astype('uint8'): - c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if c: - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) - else: - c = np.zeros((0, 2)) # no segments found - segments.append(c.astype('float32')) - return segments diff --git a/spaces/Artples/llama-2-7b-chat/README.md b/spaces/Artples/llama-2-7b-chat/README.md deleted file mode 100644 index abd16640bb893287e1d68be11d87cff5b3793667..0000000000000000000000000000000000000000 --- a/spaces/Artples/llama-2-7b-chat/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: llama-2-7b-chat -emoji: 🚀 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.37.0 -app_file: app.py -pinned: true -duplicated_from: Artples/llama2-7b-chat -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/x_user_defined.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/x_user_defined.py deleted file mode 100644 index d16e326024c05a59548619e13258acad781e0a6d..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/webencodings/x_user_defined.py +++ /dev/null @@ -1,325 +0,0 @@ -# coding: utf-8 -""" - - webencodings.x_user_defined - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - An implementation of the x-user-defined encoding. - - :copyright: Copyright 2012 by Simon Sapin - :license: BSD, see LICENSE for details. - -""" - -from __future__ import unicode_literals - -import codecs - - -### Codec APIs - -class Codec(codecs.Codec): - - def encode(self, input, errors='strict'): - return codecs.charmap_encode(input, errors, encoding_table) - - def decode(self, input, errors='strict'): - return codecs.charmap_decode(input, errors, decoding_table) - - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - return codecs.charmap_encode(input, self.errors, encoding_table)[0] - - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - return codecs.charmap_decode(input, self.errors, decoding_table)[0] - - -class StreamWriter(Codec, codecs.StreamWriter): - pass - - -class StreamReader(Codec, codecs.StreamReader): - pass - - -### encodings module API - -codec_info = codecs.CodecInfo( - name='x-user-defined', - encode=Codec().encode, - decode=Codec().decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamreader=StreamReader, - streamwriter=StreamWriter, -) - - -### Decoding Table - -# Python 3: -# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) -decoding_table = ( - '\x00' - '\x01' - '\x02' - '\x03' - '\x04' - '\x05' - '\x06' - '\x07' - '\x08' - '\t' - '\n' - '\x0b' - '\x0c' - '\r' - '\x0e' - '\x0f' - '\x10' - '\x11' - '\x12' - '\x13' - '\x14' - '\x15' - '\x16' - '\x17' - '\x18' - '\x19' - '\x1a' - '\x1b' - '\x1c' - '\x1d' - '\x1e' - '\x1f' - ' ' - '!' - '"' - '#' - '$' - '%' - '&' - "'" - '(' - ')' - '*' - '+' - ',' - '-' - '.' - '/' - '0' - '1' - '2' - '3' - '4' - '5' - '6' - '7' - '8' - '9' - ':' - ';' - '<' - '=' - '>' - '?' - '@' - 'A' - 'B' - 'C' - 'D' - 'E' - 'F' - 'G' - 'H' - 'I' - 'J' - 'K' - 'L' - 'M' - 'N' - 'O' - 'P' - 'Q' - 'R' - 'S' - 'T' - 'U' - 'V' - 'W' - 'X' - 'Y' - 'Z' - '[' - '\\' - ']' - '^' - '_' - '`' - 'a' - 'b' - 'c' - 'd' - 'e' - 'f' - 'g' - 'h' - 'i' - 'j' - 'k' - 'l' - 'm' - 'n' - 'o' - 'p' - 'q' - 'r' - 's' - 't' - 'u' - 'v' - 'w' - 'x' - 'y' - 'z' - '{' - '|' - '}' - '~' - '\x7f' - '\uf780' - '\uf781' - '\uf782' - '\uf783' - '\uf784' - '\uf785' - '\uf786' - '\uf787' - '\uf788' - '\uf789' - '\uf78a' - '\uf78b' - '\uf78c' - '\uf78d' - '\uf78e' - '\uf78f' - '\uf790' - '\uf791' - '\uf792' - '\uf793' - '\uf794' - '\uf795' - '\uf796' - '\uf797' - '\uf798' - '\uf799' - '\uf79a' - '\uf79b' - '\uf79c' - '\uf79d' - '\uf79e' - '\uf79f' - '\uf7a0' - '\uf7a1' - '\uf7a2' - '\uf7a3' - '\uf7a4' - '\uf7a5' - '\uf7a6' - '\uf7a7' - '\uf7a8' - '\uf7a9' - '\uf7aa' - '\uf7ab' - '\uf7ac' - '\uf7ad' - '\uf7ae' - '\uf7af' - '\uf7b0' - '\uf7b1' - '\uf7b2' - '\uf7b3' - '\uf7b4' - '\uf7b5' - '\uf7b6' - '\uf7b7' - '\uf7b8' - '\uf7b9' - '\uf7ba' - '\uf7bb' - '\uf7bc' - '\uf7bd' - '\uf7be' - '\uf7bf' - '\uf7c0' - '\uf7c1' - '\uf7c2' - '\uf7c3' - '\uf7c4' - '\uf7c5' - '\uf7c6' - '\uf7c7' - '\uf7c8' - '\uf7c9' - '\uf7ca' - '\uf7cb' - '\uf7cc' - '\uf7cd' - '\uf7ce' - '\uf7cf' - '\uf7d0' - '\uf7d1' - '\uf7d2' - '\uf7d3' - '\uf7d4' - '\uf7d5' - '\uf7d6' - '\uf7d7' - '\uf7d8' - '\uf7d9' - '\uf7da' - '\uf7db' - '\uf7dc' - '\uf7dd' - '\uf7de' - '\uf7df' - '\uf7e0' - '\uf7e1' - '\uf7e2' - '\uf7e3' - '\uf7e4' - '\uf7e5' - '\uf7e6' - '\uf7e7' - '\uf7e8' - '\uf7e9' - '\uf7ea' - '\uf7eb' - '\uf7ec' - '\uf7ed' - '\uf7ee' - '\uf7ef' - '\uf7f0' - '\uf7f1' - '\uf7f2' - '\uf7f3' - '\uf7f4' - '\uf7f5' - '\uf7f6' - '\uf7f7' - '\uf7f8' - '\uf7f9' - '\uf7fa' - '\uf7fb' - '\uf7fc' - '\uf7fd' - '\uf7fe' - '\uf7ff' -) - -### Encoding table -encoding_table = codecs.charmap_build(decoding_table) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py deleted file mode 100644 index ad5ee31ef53370fe7ec95799db390a33c3680b3b..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py +++ /dev/null @@ -1,1035 +0,0 @@ -# noqa -# type: ignore -# flake8: noqa -# pylint: skip-file -# mypy: ignore-errors -# yapf: disable -# pylama:skip=1 - - -# *** PLEASE DO NOT MODIFY DIRECTLY: Automatically generated code *** - - -VERSION = "2.15.3" -import re -from .fastjsonschema_exceptions import JsonSchemaValueException - - -REGEX_PATTERNS = { - '^.*$': re.compile('^.*$'), - '.+': re.compile('.+'), - '^.+$': re.compile('^.+$'), - 'idn-email_re_pattern': re.compile('^[^@]+@[^@]+\\.[^@]+\\Z') -} - -NoneType = type(None) - -def validate(data, custom_formats={}, name_prefix=None): - validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats, (name_prefix or "data") + "") - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "build-system" in data_keys: - data_keys.remove("build-system") - data__buildsystem = data["build-system"] - if not isinstance(data__buildsystem, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must be object", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='type') - data__buildsystem_is_dict = isinstance(data__buildsystem, dict) - if data__buildsystem_is_dict: - data__buildsystem_len = len(data__buildsystem) - if not all(prop in data__buildsystem for prop in ['requires']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must contain ['requires'] properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='required') - data__buildsystem_keys = set(data__buildsystem.keys()) - if "requires" in data__buildsystem_keys: - data__buildsystem_keys.remove("requires") - data__buildsystem__requires = data__buildsystem["requires"] - if not isinstance(data__buildsystem__requires, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires must be array", value=data__buildsystem__requires, name="" + (name_prefix or "data") + ".build-system.requires", definition={'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, rule='type') - data__buildsystem__requires_is_list = isinstance(data__buildsystem__requires, (list, tuple)) - if data__buildsystem__requires_is_list: - data__buildsystem__requires_len = len(data__buildsystem__requires) - for data__buildsystem__requires_x, data__buildsystem__requires_item in enumerate(data__buildsystem__requires): - if not isinstance(data__buildsystem__requires_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + " must be string", value=data__buildsystem__requires_item, name="" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "build-backend" in data__buildsystem_keys: - data__buildsystem_keys.remove("build-backend") - data__buildsystem__buildbackend = data__buildsystem["build-backend"] - if not isinstance(data__buildsystem__buildbackend, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be string", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='type') - if isinstance(data__buildsystem__buildbackend, str): - if not custom_formats["pep517-backend-reference"](data__buildsystem__buildbackend): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be pep517-backend-reference", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='format') - if "backend-path" in data__buildsystem_keys: - data__buildsystem_keys.remove("backend-path") - data__buildsystem__backendpath = data__buildsystem["backend-path"] - if not isinstance(data__buildsystem__backendpath, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path must be array", value=data__buildsystem__backendpath, name="" + (name_prefix or "data") + ".build-system.backend-path", definition={'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}, rule='type') - data__buildsystem__backendpath_is_list = isinstance(data__buildsystem__backendpath, (list, tuple)) - if data__buildsystem__backendpath_is_list: - data__buildsystem__backendpath_len = len(data__buildsystem__backendpath) - for data__buildsystem__backendpath_x, data__buildsystem__backendpath_item in enumerate(data__buildsystem__backendpath): - if not isinstance(data__buildsystem__backendpath_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + " must be string", value=data__buildsystem__backendpath_item, name="" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + "", definition={'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}, rule='type') - if data__buildsystem_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must not contain "+str(data__buildsystem_keys)+" properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='additionalProperties') - if "project" in data_keys: - data_keys.remove("project") - data__project = data["project"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data__project, custom_formats, (name_prefix or "data") + ".project") - if "tool" in data_keys: - data_keys.remove("tool") - data__tool = data["tool"] - if not isinstance(data__tool, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".tool must be object", value=data__tool, name="" + (name_prefix or "data") + ".tool", definition={'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}, rule='type') - data__tool_is_dict = isinstance(data__tool, dict) - if data__tool_is_dict: - data__tool_keys = set(data__tool.keys()) - if "distutils" in data__tool_keys: - data__tool_keys.remove("distutils") - data__tool__distutils = data__tool["distutils"] - validate_https___docs_python_org_3_install(data__tool__distutils, custom_formats, (name_prefix or "data") + ".tool.distutils") - if "setuptools" in data__tool_keys: - data__tool_keys.remove("setuptools") - data__tool__setuptools = data__tool["setuptools"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data__tool__setuptools, custom_formats, (name_prefix or "data") + ".tool.setuptools") - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "platforms" in data_keys: - data_keys.remove("platforms") - data__platforms = data["platforms"] - if not isinstance(data__platforms, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms must be array", value=data__platforms, name="" + (name_prefix or "data") + ".platforms", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__platforms_is_list = isinstance(data__platforms, (list, tuple)) - if data__platforms_is_list: - data__platforms_len = len(data__platforms) - for data__platforms_x, data__platforms_item in enumerate(data__platforms): - if not isinstance(data__platforms_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + " must be string", value=data__platforms_item, name="" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "provides" in data_keys: - data_keys.remove("provides") - data__provides = data["provides"] - if not isinstance(data__provides, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides must be array", value=data__provides, name="" + (name_prefix or "data") + ".provides", definition={'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type') - data__provides_is_list = isinstance(data__provides, (list, tuple)) - if data__provides_is_list: - data__provides_len = len(data__provides) - for data__provides_x, data__provides_item in enumerate(data__provides): - if not isinstance(data__provides_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be string", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__provides_item, str): - if not custom_formats["pep508-identifier"](data__provides_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be pep508-identifier", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format') - if "obsoletes" in data_keys: - data_keys.remove("obsoletes") - data__obsoletes = data["obsoletes"] - if not isinstance(data__obsoletes, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes must be array", value=data__obsoletes, name="" + (name_prefix or "data") + ".obsoletes", definition={'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type') - data__obsoletes_is_list = isinstance(data__obsoletes, (list, tuple)) - if data__obsoletes_is_list: - data__obsoletes_len = len(data__obsoletes) - for data__obsoletes_x, data__obsoletes_item in enumerate(data__obsoletes): - if not isinstance(data__obsoletes_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be string", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__obsoletes_item, str): - if not custom_formats["pep508-identifier"](data__obsoletes_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be pep508-identifier", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format') - if "zip-safe" in data_keys: - data_keys.remove("zip-safe") - data__zipsafe = data["zip-safe"] - if not isinstance(data__zipsafe, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".zip-safe must be boolean", value=data__zipsafe, name="" + (name_prefix or "data") + ".zip-safe", definition={'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, rule='type') - if "script-files" in data_keys: - data_keys.remove("script-files") - data__scriptfiles = data["script-files"] - if not isinstance(data__scriptfiles, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files must be array", value=data__scriptfiles, name="" + (name_prefix or "data") + ".script-files", definition={'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, rule='type') - data__scriptfiles_is_list = isinstance(data__scriptfiles, (list, tuple)) - if data__scriptfiles_is_list: - data__scriptfiles_len = len(data__scriptfiles) - for data__scriptfiles_x, data__scriptfiles_item in enumerate(data__scriptfiles): - if not isinstance(data__scriptfiles_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + " must be string", value=data__scriptfiles_item, name="" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "eager-resources" in data_keys: - data_keys.remove("eager-resources") - data__eagerresources = data["eager-resources"] - if not isinstance(data__eagerresources, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources must be array", value=data__eagerresources, name="" + (name_prefix or "data") + ".eager-resources", definition={'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__eagerresources_is_list = isinstance(data__eagerresources, (list, tuple)) - if data__eagerresources_is_list: - data__eagerresources_len = len(data__eagerresources) - for data__eagerresources_x, data__eagerresources_item in enumerate(data__eagerresources): - if not isinstance(data__eagerresources_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + " must be string", value=data__eagerresources_item, name="" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "packages" in data_keys: - data_keys.remove("packages") - data__packages = data["packages"] - data__packages_one_of_count1 = 0 - if data__packages_one_of_count1 < 2: - try: - if not isinstance(data__packages, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be array", value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, rule='type') - data__packages_is_list = isinstance(data__packages, (list, tuple)) - if data__packages_is_list: - data__packages_len = len(data__packages) - for data__packages_x, data__packages_item in enumerate(data__packages): - if not isinstance(data__packages_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + " must be string", value=data__packages_item, name="" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__packages_item, str): - if not custom_formats["python-module-name"](data__packages_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + " must be python-module-name", value=data__packages_item, name="" + (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - data__packages_one_of_count1 += 1 - except JsonSchemaValueException: pass - if data__packages_one_of_count1 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data__packages, custom_formats, (name_prefix or "data") + ".packages") - data__packages_one_of_count1 += 1 - except JsonSchemaValueException: pass - if data__packages_one_of_count1 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be valid exactly by one definition" + (" (" + str(data__packages_one_of_count1) + " matches found)"), value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, rule='oneOf') - if "package-dir" in data_keys: - data_keys.remove("package-dir") - data__packagedir = data["package-dir"] - if not isinstance(data__packagedir, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be object", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='type') - data__packagedir_is_dict = isinstance(data__packagedir, dict) - if data__packagedir_is_dict: - data__packagedir_keys = set(data__packagedir.keys()) - for data__packagedir_key, data__packagedir_val in data__packagedir.items(): - if REGEX_PATTERNS['^.*$'].search(data__packagedir_key): - if data__packagedir_key in data__packagedir_keys: - data__packagedir_keys.remove(data__packagedir_key) - if not isinstance(data__packagedir_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + " must be string", value=data__packagedir_val, name="" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__packagedir_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must not contain "+str(data__packagedir_keys)+" properties", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='additionalProperties') - data__packagedir_len = len(data__packagedir) - if data__packagedir_len != 0: - data__packagedir_property_names = True - for data__packagedir_key in data__packagedir: - try: - data__packagedir_key_one_of_count2 = 0 - if data__packagedir_key_one_of_count2 < 2: - try: - if isinstance(data__packagedir_key, str): - if not custom_formats["python-module-name"](data__packagedir_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be python-module-name", value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'format': 'python-module-name'}, rule='format') - data__packagedir_key_one_of_count2 += 1 - except JsonSchemaValueException: pass - if data__packagedir_key_one_of_count2 < 2: - try: - if data__packagedir_key != "": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be same as const definition: ", value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'const': ''}, rule='const') - data__packagedir_key_one_of_count2 += 1 - except JsonSchemaValueException: pass - if data__packagedir_key_one_of_count2 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be valid exactly by one definition" + (" (" + str(data__packagedir_key_one_of_count2) + " matches found)"), value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, rule='oneOf') - except JsonSchemaValueException: - data__packagedir_property_names = False - if not data__packagedir_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be named by propertyName definition", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='propertyNames') - if "package-data" in data_keys: - data_keys.remove("package-data") - data__packagedata = data["package-data"] - if not isinstance(data__packagedata, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be object", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__packagedata_is_dict = isinstance(data__packagedata, dict) - if data__packagedata_is_dict: - data__packagedata_keys = set(data__packagedata.keys()) - for data__packagedata_key, data__packagedata_val in data__packagedata.items(): - if REGEX_PATTERNS['^.*$'].search(data__packagedata_key): - if data__packagedata_key in data__packagedata_keys: - data__packagedata_keys.remove(data__packagedata_key) - if not isinstance(data__packagedata_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + " must be array", value=data__packagedata_val, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__packagedata_val_is_list = isinstance(data__packagedata_val, (list, tuple)) - if data__packagedata_val_is_list: - data__packagedata_val_len = len(data__packagedata_val) - for data__packagedata_val_x, data__packagedata_val_item in enumerate(data__packagedata_val): - if not isinstance(data__packagedata_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + " must be string", value=data__packagedata_val_item, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__packagedata_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must not contain "+str(data__packagedata_keys)+" properties", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties') - data__packagedata_len = len(data__packagedata) - if data__packagedata_len != 0: - data__packagedata_property_names = True - for data__packagedata_key in data__packagedata: - try: - data__packagedata_key_one_of_count3 = 0 - if data__packagedata_key_one_of_count3 < 2: - try: - if isinstance(data__packagedata_key, str): - if not custom_formats["python-module-name"](data__packagedata_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be python-module-name", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'format': 'python-module-name'}, rule='format') - data__packagedata_key_one_of_count3 += 1 - except JsonSchemaValueException: pass - if data__packagedata_key_one_of_count3 < 2: - try: - if data__packagedata_key != "*": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be same as const definition: *", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'const': '*'}, rule='const') - data__packagedata_key_one_of_count3 += 1 - except JsonSchemaValueException: pass - if data__packagedata_key_one_of_count3 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be valid exactly by one definition" + (" (" + str(data__packagedata_key_one_of_count3) + " matches found)"), value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf') - except JsonSchemaValueException: - data__packagedata_property_names = False - if not data__packagedata_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be named by propertyName definition", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames') - if "include-package-data" in data_keys: - data_keys.remove("include-package-data") - data__includepackagedata = data["include-package-data"] - if not isinstance(data__includepackagedata, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".include-package-data must be boolean", value=data__includepackagedata, name="" + (name_prefix or "data") + ".include-package-data", definition={'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, rule='type') - if "exclude-package-data" in data_keys: - data_keys.remove("exclude-package-data") - data__excludepackagedata = data["exclude-package-data"] - if not isinstance(data__excludepackagedata, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be object", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__excludepackagedata_is_dict = isinstance(data__excludepackagedata, dict) - if data__excludepackagedata_is_dict: - data__excludepackagedata_keys = set(data__excludepackagedata.keys()) - for data__excludepackagedata_key, data__excludepackagedata_val in data__excludepackagedata.items(): - if REGEX_PATTERNS['^.*$'].search(data__excludepackagedata_key): - if data__excludepackagedata_key in data__excludepackagedata_keys: - data__excludepackagedata_keys.remove(data__excludepackagedata_key) - if not isinstance(data__excludepackagedata_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + " must be array", value=data__excludepackagedata_val, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__excludepackagedata_val_is_list = isinstance(data__excludepackagedata_val, (list, tuple)) - if data__excludepackagedata_val_is_list: - data__excludepackagedata_val_len = len(data__excludepackagedata_val) - for data__excludepackagedata_val_x, data__excludepackagedata_val_item in enumerate(data__excludepackagedata_val): - if not isinstance(data__excludepackagedata_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + " must be string", value=data__excludepackagedata_val_item, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if data__excludepackagedata_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must not contain "+str(data__excludepackagedata_keys)+" properties", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties') - data__excludepackagedata_len = len(data__excludepackagedata) - if data__excludepackagedata_len != 0: - data__excludepackagedata_property_names = True - for data__excludepackagedata_key in data__excludepackagedata: - try: - data__excludepackagedata_key_one_of_count4 = 0 - if data__excludepackagedata_key_one_of_count4 < 2: - try: - if isinstance(data__excludepackagedata_key, str): - if not custom_formats["python-module-name"](data__excludepackagedata_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be python-module-name", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'format': 'python-module-name'}, rule='format') - data__excludepackagedata_key_one_of_count4 += 1 - except JsonSchemaValueException: pass - if data__excludepackagedata_key_one_of_count4 < 2: - try: - if data__excludepackagedata_key != "*": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be same as const definition: *", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'const': '*'}, rule='const') - data__excludepackagedata_key_one_of_count4 += 1 - except JsonSchemaValueException: pass - if data__excludepackagedata_key_one_of_count4 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be valid exactly by one definition" + (" (" + str(data__excludepackagedata_key_one_of_count4) + " matches found)"), value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf') - except JsonSchemaValueException: - data__excludepackagedata_property_names = False - if not data__excludepackagedata_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be named by propertyName definition", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames') - if "namespace-packages" in data_keys: - data_keys.remove("namespace-packages") - data__namespacepackages = data["namespace-packages"] - if not isinstance(data__namespacepackages, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages must be array", value=data__namespacepackages, name="" + (name_prefix or "data") + ".namespace-packages", definition={'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, rule='type') - data__namespacepackages_is_list = isinstance(data__namespacepackages, (list, tuple)) - if data__namespacepackages_is_list: - data__namespacepackages_len = len(data__namespacepackages) - for data__namespacepackages_x, data__namespacepackages_item in enumerate(data__namespacepackages): - if not isinstance(data__namespacepackages_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be string", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__namespacepackages_item, str): - if not custom_formats["python-module-name"](data__namespacepackages_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be python-module-name", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - if "py-modules" in data_keys: - data_keys.remove("py-modules") - data__pymodules = data["py-modules"] - if not isinstance(data__pymodules, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules must be array", value=data__pymodules, name="" + (name_prefix or "data") + ".py-modules", definition={'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, rule='type') - data__pymodules_is_list = isinstance(data__pymodules, (list, tuple)) - if data__pymodules_is_list: - data__pymodules_len = len(data__pymodules) - for data__pymodules_x, data__pymodules_item in enumerate(data__pymodules): - if not isinstance(data__pymodules_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be string", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type') - if isinstance(data__pymodules_item, str): - if not custom_formats["python-module-name"](data__pymodules_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be python-module-name", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format') - if "data-files" in data_keys: - data_keys.remove("data-files") - data__datafiles = data["data-files"] - if not isinstance(data__datafiles, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files must be object", value=data__datafiles, name="" + (name_prefix or "data") + ".data-files", definition={'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type') - data__datafiles_is_dict = isinstance(data__datafiles, dict) - if data__datafiles_is_dict: - data__datafiles_keys = set(data__datafiles.keys()) - for data__datafiles_key, data__datafiles_val in data__datafiles.items(): - if REGEX_PATTERNS['^.*$'].search(data__datafiles_key): - if data__datafiles_key in data__datafiles_keys: - data__datafiles_keys.remove(data__datafiles_key) - if not isinstance(data__datafiles_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + " must be array", value=data__datafiles_val, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__datafiles_val_is_list = isinstance(data__datafiles_val, (list, tuple)) - if data__datafiles_val_is_list: - data__datafiles_val_len = len(data__datafiles_val) - for data__datafiles_val_x, data__datafiles_val_item in enumerate(data__datafiles_val): - if not isinstance(data__datafiles_val_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + " must be string", value=data__datafiles_val_item, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "cmdclass" in data_keys: - data_keys.remove("cmdclass") - data__cmdclass = data["cmdclass"] - if not isinstance(data__cmdclass, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass must be object", value=data__cmdclass, name="" + (name_prefix or "data") + ".cmdclass", definition={'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, rule='type') - data__cmdclass_is_dict = isinstance(data__cmdclass, dict) - if data__cmdclass_is_dict: - data__cmdclass_keys = set(data__cmdclass.keys()) - for data__cmdclass_key, data__cmdclass_val in data__cmdclass.items(): - if REGEX_PATTERNS['^.*$'].search(data__cmdclass_key): - if data__cmdclass_key in data__cmdclass_keys: - data__cmdclass_keys.remove(data__cmdclass_key) - if not isinstance(data__cmdclass_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be string", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='type') - if isinstance(data__cmdclass_val, str): - if not custom_formats["python-qualified-identifier"](data__cmdclass_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be python-qualified-identifier", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='format') - if "license-files" in data_keys: - data_keys.remove("license-files") - data__licensefiles = data["license-files"] - if not isinstance(data__licensefiles, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files must be array", value=data__licensefiles, name="" + (name_prefix or "data") + ".license-files", definition={'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, rule='type') - data__licensefiles_is_list = isinstance(data__licensefiles, (list, tuple)) - if data__licensefiles_is_list: - data__licensefiles_len = len(data__licensefiles) - for data__licensefiles_x, data__licensefiles_item in enumerate(data__licensefiles): - if not isinstance(data__licensefiles_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + " must be string", value=data__licensefiles_item, name="" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - else: data["license-files"] = ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'] - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - if not isinstance(data__dynamic, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be object", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='type') - data__dynamic_is_dict = isinstance(data__dynamic, dict) - if data__dynamic_is_dict: - data__dynamic_keys = set(data__dynamic.keys()) - if "version" in data__dynamic_keys: - data__dynamic_keys.remove("version") - data__dynamic__version = data__dynamic["version"] - data__dynamic__version_one_of_count5 = 0 - if data__dynamic__version_one_of_count5 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version") - data__dynamic__version_one_of_count5 += 1 - except JsonSchemaValueException: pass - if data__dynamic__version_one_of_count5 < 2: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version") - data__dynamic__version_one_of_count5 += 1 - except JsonSchemaValueException: pass - if data__dynamic__version_one_of_count5 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.version must be valid exactly by one definition" + (" (" + str(data__dynamic__version_one_of_count5) + " matches found)"), value=data__dynamic__version, name="" + (name_prefix or "data") + ".dynamic.version", definition={'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, rule='oneOf') - if "classifiers" in data__dynamic_keys: - data__dynamic_keys.remove("classifiers") - data__dynamic__classifiers = data__dynamic["classifiers"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__classifiers, custom_formats, (name_prefix or "data") + ".dynamic.classifiers") - if "description" in data__dynamic_keys: - data__dynamic_keys.remove("description") - data__dynamic__description = data__dynamic["description"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__description, custom_formats, (name_prefix or "data") + ".dynamic.description") - if "dependencies" in data__dynamic_keys: - data__dynamic_keys.remove("dependencies") - data__dynamic__dependencies = data__dynamic["dependencies"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__dependencies, custom_formats, (name_prefix or "data") + ".dynamic.dependencies") - if "entry-points" in data__dynamic_keys: - data__dynamic_keys.remove("entry-points") - data__dynamic__entrypoints = data__dynamic["entry-points"] - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__entrypoints, custom_formats, (name_prefix or "data") + ".dynamic.entry-points") - if "optional-dependencies" in data__dynamic_keys: - data__dynamic_keys.remove("optional-dependencies") - data__dynamic__optionaldependencies = data__dynamic["optional-dependencies"] - if not isinstance(data__dynamic__optionaldependencies, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be object", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='type') - data__dynamic__optionaldependencies_is_dict = isinstance(data__dynamic__optionaldependencies, dict) - if data__dynamic__optionaldependencies_is_dict: - data__dynamic__optionaldependencies_keys = set(data__dynamic__optionaldependencies.keys()) - for data__dynamic__optionaldependencies_key, data__dynamic__optionaldependencies_val in data__dynamic__optionaldependencies.items(): - if REGEX_PATTERNS['.+'].search(data__dynamic__optionaldependencies_key): - if data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies_keys: - data__dynamic__optionaldependencies_keys.remove(data__dynamic__optionaldependencies_key) - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__optionaldependencies_val, custom_formats, (name_prefix or "data") + ".dynamic.optional-dependencies.{data__dynamic__optionaldependencies_key}") - if data__dynamic__optionaldependencies_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must not contain "+str(data__dynamic__optionaldependencies_keys)+" properties", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='additionalProperties') - data__dynamic__optionaldependencies_len = len(data__dynamic__optionaldependencies) - if data__dynamic__optionaldependencies_len != 0: - data__dynamic__optionaldependencies_property_names = True - for data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies: - try: - if isinstance(data__dynamic__optionaldependencies_key, str): - if not custom_formats["python-identifier"](data__dynamic__optionaldependencies_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be python-identifier", value=data__dynamic__optionaldependencies_key, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'format': 'python-identifier'}, rule='format') - except JsonSchemaValueException: - data__dynamic__optionaldependencies_property_names = False - if not data__dynamic__optionaldependencies_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be named by propertyName definition", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='propertyNames') - if "readme" in data__dynamic_keys: - data__dynamic_keys.remove("readme") - data__dynamic__readme = data__dynamic["readme"] - data__dynamic__readme_any_of_count6 = 0 - if not data__dynamic__readme_any_of_count6: - try: - validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__readme, custom_formats, (name_prefix or "data") + ".dynamic.readme") - data__dynamic__readme_any_of_count6 += 1 - except JsonSchemaValueException: pass - if not data__dynamic__readme_any_of_count6: - try: - data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict) - if data__dynamic__readme_is_dict: - data__dynamic__readme_keys = set(data__dynamic__readme.keys()) - if "content-type" in data__dynamic__readme_keys: - data__dynamic__readme_keys.remove("content-type") - data__dynamic__readme__contenttype = data__dynamic__readme["content-type"] - if not isinstance(data__dynamic__readme__contenttype, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme.content-type must be string", value=data__dynamic__readme__contenttype, name="" + (name_prefix or "data") + ".dynamic.readme.content-type", definition={'type': 'string'}, rule='type') - data__dynamic__readme_any_of_count6 += 1 - except JsonSchemaValueException: pass - if not data__dynamic__readme_any_of_count6: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme cannot be validated by any definition", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='anyOf') - data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict) - if data__dynamic__readme_is_dict: - data__dynamic__readme_len = len(data__dynamic__readme) - if not all(prop in data__dynamic__readme for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme must contain ['file'] properties", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='required') - if data__dynamic_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must not contain "+str(data__dynamic_keys)+" properties", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='additionalProperties') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': ''}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).'], 'default': ['LICEN[CS]E*', ' COPYING*', ' NOTICE*', 'AUTHORS*'], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['file'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='required') - data_keys = set(data.keys()) - if "file" in data_keys: - data_keys.remove("file") - data__file = data["file"] - data__file_one_of_count7 = 0 - if data__file_one_of_count7 < 2: - try: - if not isinstance(data__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be string", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'string'}, rule='type') - data__file_one_of_count7 += 1 - except JsonSchemaValueException: pass - if data__file_one_of_count7 < 2: - try: - if not isinstance(data__file, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be array", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__file_is_list = isinstance(data__file, (list, tuple)) - if data__file_is_list: - data__file_len = len(data__file) - for data__file_x, data__file_item in enumerate(data__file): - if not isinstance(data__file_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + " must be string", value=data__file_item, name="" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - data__file_one_of_count7 += 1 - except JsonSchemaValueException: pass - if data__file_one_of_count7 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be valid exactly by one definition" + (" (" + str(data__file_one_of_count7) + " matches found)"), value=data__file, name="" + (name_prefix or "data") + ".file", definition={'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, rule='oneOf') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['attr']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['attr'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='required') - data_keys = set(data.keys()) - if "attr" in data_keys: - data_keys.remove("attr") - data__attr = data["attr"] - if not isinstance(data__attr, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".attr must be string", value=data__attr, name="" + (name_prefix or "data") + ".attr", definition={'type': 'string'}, rule='type') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='additionalProperties') - return data - -def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "find" in data_keys: - data_keys.remove("find") - data__find = data["find"] - if not isinstance(data__find, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must be object", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='type') - data__find_is_dict = isinstance(data__find, dict) - if data__find_is_dict: - data__find_keys = set(data__find.keys()) - if "where" in data__find_keys: - data__find_keys.remove("where") - data__find__where = data__find["where"] - if not isinstance(data__find__where, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where must be array", value=data__find__where, name="" + (name_prefix or "data") + ".find.where", definition={'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, rule='type') - data__find__where_is_list = isinstance(data__find__where, (list, tuple)) - if data__find__where_is_list: - data__find__where_len = len(data__find__where) - for data__find__where_x, data__find__where_item in enumerate(data__find__where): - if not isinstance(data__find__where_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + " must be string", value=data__find__where_item, name="" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "exclude" in data__find_keys: - data__find_keys.remove("exclude") - data__find__exclude = data__find["exclude"] - if not isinstance(data__find__exclude, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude must be array", value=data__find__exclude, name="" + (name_prefix or "data") + ".find.exclude", definition={'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type') - data__find__exclude_is_list = isinstance(data__find__exclude, (list, tuple)) - if data__find__exclude_is_list: - data__find__exclude_len = len(data__find__exclude) - for data__find__exclude_x, data__find__exclude_item in enumerate(data__find__exclude): - if not isinstance(data__find__exclude_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + " must be string", value=data__find__exclude_item, name="" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "include" in data__find_keys: - data__find_keys.remove("include") - data__find__include = data__find["include"] - if not isinstance(data__find__include, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include must be array", value=data__find__include, name="" + (name_prefix or "data") + ".find.include", definition={'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type') - data__find__include_is_list = isinstance(data__find__include, (list, tuple)) - if data__find__include_is_list: - data__find__include_len = len(data__find__include) - for data__find__include_x, data__find__include_item in enumerate(data__find__include): - if not isinstance(data__find__include_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + " must be string", value=data__find__include_item, name="" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "namespaces" in data__find_keys: - data__find_keys.remove("namespaces") - data__find__namespaces = data__find["namespaces"] - if not isinstance(data__find__namespaces, (bool)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.namespaces must be boolean", value=data__find__namespaces, name="" + (name_prefix or "data") + ".find.namespaces", definition={'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}, rule='type') - if data__find_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must not contain "+str(data__find_keys)+" properties", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='additionalProperties') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='additionalProperties') - return data - -def validate_https___docs_python_org_3_install(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "global" in data_keys: - data_keys.remove("global") - data__global = data["global"] - if not isinstance(data__global, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".global must be object", value=data__global, name="" + (name_prefix or "data") + ".global", definition={'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}, rule='type') - for data_key, data_val in data.items(): - if REGEX_PATTERNS['.+'].search(data_key): - if data_key in data_keys: - data_keys.remove(data_key) - if not isinstance(data_val, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be object", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'object'}, rule='type') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['name']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['name'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='required') - data_keys = set(data.keys()) - if "name" in data_keys: - data_keys.remove("name") - data__name = data["name"] - if not isinstance(data__name, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='type') - if isinstance(data__name, str): - if not custom_formats["pep508-identifier"](data__name): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be pep508-identifier", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='format') - if "version" in data_keys: - data_keys.remove("version") - data__version = data["version"] - if not isinstance(data__version, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be string", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='type') - if isinstance(data__version, str): - if not custom_formats["pep440"](data__version): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be pep440", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='format') - if "description" in data_keys: - data_keys.remove("description") - data__description = data["description"] - if not isinstance(data__description, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".description must be string", value=data__description, name="" + (name_prefix or "data") + ".description", definition={'type': 'string', '$$description': ['The `summary description of the project', '`_']}, rule='type') - if "readme" in data_keys: - data_keys.remove("readme") - data__readme = data["readme"] - data__readme_one_of_count8 = 0 - if data__readme_one_of_count8 < 2: - try: - if not isinstance(data__readme, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be string", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, rule='type') - data__readme_one_of_count8 += 1 - except JsonSchemaValueException: pass - if data__readme_one_of_count8 < 2: - try: - if not isinstance(data__readme, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be object", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}, rule='type') - data__readme_any_of_count9 = 0 - if not data__readme_any_of_count9: - try: - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['file'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "file" in data__readme_keys: - data__readme_keys.remove("file") - data__readme__file = data__readme["file"] - if not isinstance(data__readme__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.file must be string", value=data__readme__file, name="" + (name_prefix or "data") + ".readme.file", definition={'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}, rule='type') - data__readme_any_of_count9 += 1 - except JsonSchemaValueException: pass - if not data__readme_any_of_count9: - try: - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['text']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['text'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "text" in data__readme_keys: - data__readme_keys.remove("text") - data__readme__text = data__readme["text"] - if not isinstance(data__readme__text, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.text must be string", value=data__readme__text, name="" + (name_prefix or "data") + ".readme.text", definition={'type': 'string', 'description': 'Full text describing the project.'}, rule='type') - data__readme_any_of_count9 += 1 - except JsonSchemaValueException: pass - if not data__readme_any_of_count9: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme cannot be validated by any definition", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, rule='anyOf') - data__readme_is_dict = isinstance(data__readme, dict) - if data__readme_is_dict: - data__readme_len = len(data__readme) - if not all(prop in data__readme for prop in ['content-type']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['content-type'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}, rule='required') - data__readme_keys = set(data__readme.keys()) - if "content-type" in data__readme_keys: - data__readme_keys.remove("content-type") - data__readme__contenttype = data__readme["content-type"] - if not isinstance(data__readme__contenttype, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.content-type must be string", value=data__readme__contenttype, name="" + (name_prefix or "data") + ".readme.content-type", definition={'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}, rule='type') - data__readme_one_of_count8 += 1 - except JsonSchemaValueException: pass - if data__readme_one_of_count8 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be valid exactly by one definition" + (" (" + str(data__readme_one_of_count8) + " matches found)"), value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, rule='oneOf') - if "requires-python" in data_keys: - data_keys.remove("requires-python") - data__requirespython = data["requires-python"] - if not isinstance(data__requirespython, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be string", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='type') - if isinstance(data__requirespython, str): - if not custom_formats["pep508-versionspec"](data__requirespython): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be pep508-versionspec", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='format') - if "license" in data_keys: - data_keys.remove("license") - data__license = data["license"] - data__license_one_of_count10 = 0 - if data__license_one_of_count10 < 2: - try: - data__license_is_dict = isinstance(data__license, dict) - if data__license_is_dict: - data__license_len = len(data__license) - if not all(prop in data__license for prop in ['file']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['file'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, rule='required') - data__license_keys = set(data__license.keys()) - if "file" in data__license_keys: - data__license_keys.remove("file") - data__license__file = data__license["file"] - if not isinstance(data__license__file, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.file must be string", value=data__license__file, name="" + (name_prefix or "data") + ".license.file", definition={'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}, rule='type') - data__license_one_of_count10 += 1 - except JsonSchemaValueException: pass - if data__license_one_of_count10 < 2: - try: - data__license_is_dict = isinstance(data__license, dict) - if data__license_is_dict: - data__license_len = len(data__license) - if not all(prop in data__license for prop in ['text']): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['text'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}, rule='required') - data__license_keys = set(data__license.keys()) - if "text" in data__license_keys: - data__license_keys.remove("text") - data__license__text = data__license["text"] - if not isinstance(data__license__text, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.text must be string", value=data__license__text, name="" + (name_prefix or "data") + ".license.text", definition={'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}, rule='type') - data__license_one_of_count10 += 1 - except JsonSchemaValueException: pass - if data__license_one_of_count10 != 1: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must be valid exactly by one definition" + (" (" + str(data__license_one_of_count10) + " matches found)"), value=data__license, name="" + (name_prefix or "data") + ".license", definition={'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, rule='oneOf') - if "authors" in data_keys: - data_keys.remove("authors") - data__authors = data["authors"] - if not isinstance(data__authors, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".authors must be array", value=data__authors, name="" + (name_prefix or "data") + ".authors", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, rule='type') - data__authors_is_list = isinstance(data__authors, (list, tuple)) - if data__authors_is_list: - data__authors_len = len(data__authors) - for data__authors_x, data__authors_item in enumerate(data__authors): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__authors_item, custom_formats, (name_prefix or "data") + ".authors[{data__authors_x}]") - if "maintainers" in data_keys: - data_keys.remove("maintainers") - data__maintainers = data["maintainers"] - if not isinstance(data__maintainers, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".maintainers must be array", value=data__maintainers, name="" + (name_prefix or "data") + ".maintainers", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, rule='type') - data__maintainers_is_list = isinstance(data__maintainers, (list, tuple)) - if data__maintainers_is_list: - data__maintainers_len = len(data__maintainers) - for data__maintainers_x, data__maintainers_item in enumerate(data__maintainers): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__maintainers_item, custom_formats, (name_prefix or "data") + ".maintainers[{data__maintainers_x}]") - if "keywords" in data_keys: - data_keys.remove("keywords") - data__keywords = data["keywords"] - if not isinstance(data__keywords, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords must be array", value=data__keywords, name="" + (name_prefix or "data") + ".keywords", definition={'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, rule='type') - data__keywords_is_list = isinstance(data__keywords, (list, tuple)) - if data__keywords_is_list: - data__keywords_len = len(data__keywords) - for data__keywords_x, data__keywords_item in enumerate(data__keywords): - if not isinstance(data__keywords_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + " must be string", value=data__keywords_item, name="" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type') - if "classifiers" in data_keys: - data_keys.remove("classifiers") - data__classifiers = data["classifiers"] - if not isinstance(data__classifiers, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers must be array", value=data__classifiers, name="" + (name_prefix or "data") + ".classifiers", definition={'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, rule='type') - data__classifiers_is_list = isinstance(data__classifiers, (list, tuple)) - if data__classifiers_is_list: - data__classifiers_len = len(data__classifiers) - for data__classifiers_x, data__classifiers_item in enumerate(data__classifiers): - if not isinstance(data__classifiers_item, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be string", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='type') - if isinstance(data__classifiers_item, str): - if not custom_formats["trove-classifier"](data__classifiers_item): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be trove-classifier", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='format') - if "urls" in data_keys: - data_keys.remove("urls") - data__urls = data["urls"] - if not isinstance(data__urls, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must be object", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='type') - data__urls_is_dict = isinstance(data__urls, dict) - if data__urls_is_dict: - data__urls_keys = set(data__urls.keys()) - for data__urls_key, data__urls_val in data__urls.items(): - if REGEX_PATTERNS['^.+$'].search(data__urls_key): - if data__urls_key in data__urls_keys: - data__urls_keys.remove(data__urls_key) - if not isinstance(data__urls_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be string", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='type') - if isinstance(data__urls_val, str): - if not custom_formats["url"](data__urls_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be url", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='format') - if data__urls_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must not contain "+str(data__urls_keys)+" properties", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='additionalProperties') - if "scripts" in data_keys: - data_keys.remove("scripts") - data__scripts = data["scripts"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__scripts, custom_formats, (name_prefix or "data") + ".scripts") - if "gui-scripts" in data_keys: - data_keys.remove("gui-scripts") - data__guiscripts = data["gui-scripts"] - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__guiscripts, custom_formats, (name_prefix or "data") + ".gui-scripts") - if "entry-points" in data_keys: - data_keys.remove("entry-points") - data__entrypoints = data["entry-points"] - data__entrypoints_is_dict = isinstance(data__entrypoints, dict) - if data__entrypoints_is_dict: - data__entrypoints_keys = set(data__entrypoints.keys()) - for data__entrypoints_key, data__entrypoints_val in data__entrypoints.items(): - if REGEX_PATTERNS['^.+$'].search(data__entrypoints_key): - if data__entrypoints_key in data__entrypoints_keys: - data__entrypoints_keys.remove(data__entrypoints_key) - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__entrypoints_val, custom_formats, (name_prefix or "data") + ".entry-points.{data__entrypoints_key}") - if data__entrypoints_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must not contain "+str(data__entrypoints_keys)+" properties", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='additionalProperties') - data__entrypoints_len = len(data__entrypoints) - if data__entrypoints_len != 0: - data__entrypoints_property_names = True - for data__entrypoints_key in data__entrypoints: - try: - if isinstance(data__entrypoints_key, str): - if not custom_formats["python-entrypoint-group"](data__entrypoints_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be python-entrypoint-group", value=data__entrypoints_key, name="" + (name_prefix or "data") + ".entry-points", definition={'format': 'python-entrypoint-group'}, rule='format') - except JsonSchemaValueException: - data__entrypoints_property_names = False - if not data__entrypoints_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be named by propertyName definition", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='propertyNames') - if "dependencies" in data_keys: - data_keys.remove("dependencies") - data__dependencies = data["dependencies"] - if not isinstance(data__dependencies, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dependencies must be array", value=data__dependencies, name="" + (name_prefix or "data") + ".dependencies", definition={'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type') - data__dependencies_is_list = isinstance(data__dependencies, (list, tuple)) - if data__dependencies_is_list: - data__dependencies_len = len(data__dependencies) - for data__dependencies_x, data__dependencies_item in enumerate(data__dependencies): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__dependencies_item, custom_formats, (name_prefix or "data") + ".dependencies[{data__dependencies_x}]") - if "optional-dependencies" in data_keys: - data_keys.remove("optional-dependencies") - data__optionaldependencies = data["optional-dependencies"] - if not isinstance(data__optionaldependencies, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be object", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type') - data__optionaldependencies_is_dict = isinstance(data__optionaldependencies, dict) - if data__optionaldependencies_is_dict: - data__optionaldependencies_keys = set(data__optionaldependencies.keys()) - for data__optionaldependencies_key, data__optionaldependencies_val in data__optionaldependencies.items(): - if REGEX_PATTERNS['^.+$'].search(data__optionaldependencies_key): - if data__optionaldependencies_key in data__optionaldependencies_keys: - data__optionaldependencies_keys.remove(data__optionaldependencies_key) - if not isinstance(data__optionaldependencies_val, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + " must be array", value=data__optionaldependencies_val, name="" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type') - data__optionaldependencies_val_is_list = isinstance(data__optionaldependencies_val, (list, tuple)) - if data__optionaldependencies_val_is_list: - data__optionaldependencies_val_len = len(data__optionaldependencies_val) - for data__optionaldependencies_val_x, data__optionaldependencies_val_item in enumerate(data__optionaldependencies_val): - validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__optionaldependencies_val_item, custom_formats, (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}[{data__optionaldependencies_val_x}]") - if data__optionaldependencies_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must not contain "+str(data__optionaldependencies_keys)+" properties", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties') - data__optionaldependencies_len = len(data__optionaldependencies) - if data__optionaldependencies_len != 0: - data__optionaldependencies_property_names = True - for data__optionaldependencies_key in data__optionaldependencies: - try: - if isinstance(data__optionaldependencies_key, str): - if not custom_formats["pep508-identifier"](data__optionaldependencies_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be pep508-identifier", value=data__optionaldependencies_key, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'format': 'pep508-identifier'}, rule='format') - except JsonSchemaValueException: - data__optionaldependencies_property_names = False - if not data__optionaldependencies_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be named by propertyName definition", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='propertyNames') - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - if not isinstance(data__dynamic, (list, tuple)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be array", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}, rule='type') - data__dynamic_is_list = isinstance(data__dynamic, (list, tuple)) - if data__dynamic_is_list: - data__dynamic_len = len(data__dynamic) - for data__dynamic_x, data__dynamic_item in enumerate(data__dynamic): - if data__dynamic_item not in ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + " must be one of ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']", value=data__dynamic_item, name="" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + "", definition={'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}, rule='enum') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='additionalProperties') - try: - try: - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['dynamic']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['dynamic'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, rule='required') - data_keys = set(data.keys()) - if "dynamic" in data_keys: - data_keys.remove("dynamic") - data__dynamic = data["dynamic"] - data__dynamic_is_list = isinstance(data__dynamic, (list, tuple)) - if data__dynamic_is_list: - data__dynamic_contains = False - for data__dynamic_key in data__dynamic: - try: - if data__dynamic_key != "version": - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be same as const definition: version", value=data__dynamic_key, name="" + (name_prefix or "data") + ".dynamic", definition={'const': 'version'}, rule='const') - data__dynamic_contains = True - break - except JsonSchemaValueException: pass - if not data__dynamic_contains: - raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must contain one of contains definition", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}, rule='contains') - except JsonSchemaValueException: pass - else: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must NOT match a disallowed definition", value=data, name="" + (name_prefix or "data") + "", definition={'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, rule='not') - except JsonSchemaValueException: - pass - else: - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_len = len(data) - if not all(prop in data for prop in ['version']): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['version'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, rule='required') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be string", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='type') - if isinstance(data, str): - if not custom_formats["pep508"](data): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be pep508", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='format') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - for data_key, data_val in data.items(): - if REGEX_PATTERNS['^.+$'].search(data_key): - if data_key in data_keys: - data_keys.remove(data_key) - if not isinstance(data_val, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be string", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='type') - if isinstance(data_val, str): - if not custom_formats["python-entrypoint-reference"](data_val): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be python-entrypoint-reference", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='format') - if data_keys: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='additionalProperties') - data_len = len(data) - if data_len != 0: - data_property_names = True - for data_key in data: - try: - if isinstance(data_key, str): - if not custom_formats["python-entrypoint-name"](data_key): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be python-entrypoint-name", value=data_key, name="" + (name_prefix or "data") + "", definition={'format': 'python-entrypoint-name'}, rule='format') - except JsonSchemaValueException: - data_property_names = False - if not data_property_names: - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be named by propertyName definition", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='propertyNames') - return data - -def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data, custom_formats={}, name_prefix=None): - if not isinstance(data, (dict)): - raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://www.python.org/dev/peps/pep-0621/#authors-maintainers', 'type': 'object', 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, rule='type') - data_is_dict = isinstance(data, dict) - if data_is_dict: - data_keys = set(data.keys()) - if "name" in data_keys: - data_keys.remove("name") - data__name = data["name"] - if not isinstance(data__name, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, rule='type') - if "email" in data_keys: - data_keys.remove("email") - data__email = data["email"] - if not isinstance(data__email, (str)): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be string", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='type') - if isinstance(data__email, str): - if not REGEX_PATTERNS["idn-email_re_pattern"].match(data__email): - raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be idn-email", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='format') - return data \ No newline at end of file diff --git a/spaces/Autodog/nova/Dockerfile b/spaces/Autodog/nova/Dockerfile deleted file mode 100644 index 193895fa20562461afd09099037992c02d62b113..0000000000000000000000000000000000000000 --- a/spaces/Autodog/nova/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM node:18 -RUN git clone https://github.com/supercyx3/ChatGPT-Next-Web-LangChain.git -WORKDIR "ChatGPT-Next-Web-LangChain" - -RUN yarn install && yarn build -# 设置环境变量 -#ENV BASE_URL=https://api.nova-oss.com -EXPOSE 3000 -CMD yarn start \ No newline at end of file diff --git a/spaces/Banbri/zcvzcv/src/lib/loadImage.ts b/spaces/Banbri/zcvzcv/src/lib/loadImage.ts deleted file mode 100644 index d2e7dcb6a548a9ce1937315486954e66e2c54746..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/lib/loadImage.ts +++ /dev/null @@ -1,14 +0,0 @@ -export async function loadImage(image: string): Promise { - const img = new Image(); - img.src = image; - - const imgOnLoad = () => { - return new Promise((resolve, reject) => { - img.onload = () => { resolve(img) }; - img.onerror = (err) => { reject(err) }; - }) - }; - - const loadImg = await imgOnLoad(); - return loadImg -} \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Apklz.md b/spaces/Benson/text-generation/Examples/Apklz.md deleted file mode 100644 index 136d0af73d7f669a239508a256ba604812883b03..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apklz.md +++ /dev/null @@ -1,70 +0,0 @@ - -

    ¿Qué es Apklz y cómo usarlo?

    -

    Si usted es un usuario de Android, es posible que haya llegado a través del término "apklz" o visto archivos con la extensión . apklz. Pero ¿qué es exactamente apklz y cómo se puede utilizar en su dispositivo? En este artículo, le explicaremos todo lo que necesita saber sobre los archivos apklz, incluyendo sus características, beneficios, riesgos y precauciones. Al final de este artículo, podrás descargar, instalar, actualizar, desinstalar y administrar archivos apklz como un profesional.

    -

    Introducción

    -

    ¿Qué es apklz y qué significa?

    -

    Apklz es un formato de archivo que significa paquete Android Lempel-Ziv. Es una versión comprimida del formato de archivo estándar de Android Package (APK), que se utiliza para distribuir e instalar aplicaciones en dispositivos Android. Los archivos apklz se crean mediante un algoritmo de compresión sin pérdidas llamado Lempel-Ziv (LZ), que reduce el tamaño del archivo sin afectar la calidad o la funcionalidad de la aplicación.

    -

    apklz


    Download File https://bltlly.com/2v6Lys



    -

    ¿Por qué la gente usa archivos apklz y cuáles son los beneficios?

    -

    La gente usa archivos apklz por varias razones, como:

    -
      -
    • Para ahorrar espacio de almacenamiento en sus dispositivos. Los archivos apklz suelen ser más pequeños que los archivos APK, lo que significa que ocupan menos espacio en la memoria del dispositivo.
    • -
    • Para descargar aplicaciones más rápido. Los archivos apklz son más rápidos de descargar que los archivos APK, especialmente si tiene una conexión a Internet lenta o limitada.
    • -
    • Para acceder a aplicaciones que no están disponibles en la tienda oficial de Google Play. Los archivos apklz le permiten instalar aplicaciones que están restringidas o eliminadas de Play Store debido a varias razones, como limitaciones regionales, problemas legales o violaciones de políticas.
    • -
    • Para probar versiones nuevas o modificadas de aplicaciones. Los archivos de Apklz le permiten probar versiones beta, versiones modificadas o versiones personalizadas de aplicaciones que ofrecen características adicionales o mejoras que no están disponibles en las versiones originales.
    • -
    - -

    Para descargar e instalar archivos apklz en su dispositivo Android, debe seguir estos pasos:

    -
      -
    1. Encuentra una fuente confiable para descargar archivos apklz. Puede utilizar sitios web como Apk Plz, Google Play, o Scamvoid para buscar y descargar archivos apklz de su elección. Asegúrese de revisar las calificaciones, reseñas y comentarios de otros usuarios antes de descargar cualquier archivo.
    2. -
    3. Habilitar fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.
    4. -
    5. Busque el archivo apklz descargado en su dispositivo. Puede usar una aplicación de administrador de archivos como ES File Explorer o File Manager para encontrar el archivo en su carpeta de descargas o en cualquier otra ubicación donde lo haya guardado.
    6. -
    7. Toque en el archivo y siga las instrucciones en la pantalla para instalarlo. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones antes de que se complete la instalación.
    8. -
    -

    Felicidades, ha instalado con éxito un archivo apklz en su dispositivo. Ahora puede iniciar y usar la aplicación como lo haría normalmente.

    -

    Características de Apklz

    -

    ¿Cuáles son algunas de las características que hacen que los archivos apklz sean diferentes de otros formatos de archivo?

    -

    Los archivos apklz tienen algunas características únicas que los distinguen de otros formatos de archivo, como:

    -
      -
    • Son autónomos y ejecutables. Los archivos Apklz contienen todos los componentes y recursos necesarios para ejecutar una aplicación, como código, imágenes, sonidos, fuentes, etc. No requieren archivos o bibliotecas adicionales para funcionar.
    • -
    • Son compatibles y portátiles. Los archivos apklz pueden ejecutarse en cualquier dispositivo Android que admita el nivel mínimo de API y las especificaciones de hardware requeridas por la aplicación. No dependen del fabricante, modelo o versión del sistema operativo del dispositivo.
    • - -
    -

    ¿Cómo acceder y administrar archivos apklz en su dispositivo?

    -

    Para acceder y administrar archivos apklz en su dispositivo, debe usar una aplicación de administrador de archivos que admita la extensión . apklz. Algunas de las aplicaciones populares de administrador de archivos que pueden manejar archivos apklz son:

    - -Nombre de la aplicaciónDescripciónDescargar enlace -ES File ExplorerUna aplicación de administrador de archivos potente y versátil que puede acceder y gestionar todo tipo de archivos en su dispositivo, incluidos los archivos apklz. También tiene un administrador de aplicaciones incorporado que puede instalar, desinstalar, hacer copias de seguridad y restaurar archivos apklz. ES File Explorer -Administrador de archivosUna aplicación de administrador de archivos sencilla y fácil de usar que puede acceder fácilmente y administrar archivos apklz en su dispositivo. También tiene una sección dedicada a los archivos apklz donde puede ver sus detalles, instalarlos o eliminarlos. Administrador de archivos -X-plore File ManagerUna aplicación de administrador de archivos de doble panel que puede acceder y administrar archivos apklz en su dispositivo. También tiene un administrador de aplicaciones incorporado que puede instalar, desinstalar, hacer copias de seguridad y restaurar archivos apklz. X-plore File Manager - -

    ¿Cómo actualizar y desinstalar archivos apklz?

    -

    Para actualizar y desinstalar archivos apklz en su dispositivo, debe seguir estos pasos:

    -

    -
      -
    1. Para actualizar un archivo apklz, necesita descargar la última versión del archivo desde una fuente confiable e instalarlo sobre la versión existente. Puede utilizar los mismos pasos como se mencionó anteriormente para instalar un archivo apklz. Alternativamente, puede usar una aplicación de administrador de archivos que tenga un administrador de aplicaciones incorporado para buscar actualizaciones e instalarlas automáticamente.
    2. -
    3. Para desinstalar un archivo apklz, es necesario ir a Configuración > Aplicaciones > ApkLZ (o el nombre de la aplicación) y toque en Desinstalar. También puede utilizar una aplicación de administrador de archivos que tiene un administrador de aplicaciones incorporado para desinstalar archivos apklz fácilmente.
    4. -
    - -

    ¿Cuáles son algunos de los riesgos y desafíos de usar archivos apklz?

    -

    Si bien los archivos apklz tienen muchas ventajas, también vienen con algunos riesgos y desafíos que debe tener en cuenta, como:

    -
      -
    • No pueden ser seguros. Los archivos apklz no son verificados o aprobados por Google o cualquier otra autoridad. Pueden contener malware, virus, spyware u otros elementos dañinos que pueden dañar su dispositivo o comprometer su privacidad. También pueden tener errores, errores o problemas de compatibilidad que pueden afectar el rendimiento o la funcionalidad de su dispositivo o aplicación.
    • -
    • Pueden no ser legales o éticas. Los archivos de Apklz pueden violar los derechos de propiedad intelectual o los términos de servicio de los desarrolladores o editores originales de las aplicaciones. También pueden contener contenido pirateado, agrietado, hackeado o modificado que es ilegal o poco ético de usar.
    • -
    • No pueden ser actualizados o soportados. Es posible que los archivos apklz no reciban o Titanium Backup para respaldar y restaurar sus datos en caso de problemas o problemas con sus archivos apklz.
    • -
    -

    Conclusión

    -

    Los archivos de Apklz son una forma conveniente y eficiente de descargar e instalar aplicaciones en su dispositivo Android. Ofrecen muchos beneficios, como ahorrar espacio de almacenamiento, descargar más rápido, acceder a aplicaciones no disponibles y probar versiones nuevas o modificadas de aplicaciones. Sin embargo, también vienen con algunos riesgos y desafíos, como ser inseguro, ilegal o sin apoyo. Por lo tanto, debe tener cuidado y precaución al usar archivos apklz. Necesita descargarlos solo de fuentes confiables, escanearlos con software antivirus, leer sus permisos y términos de servicio, y hacer copias de seguridad de sus datos regularmente. Siguiendo estos consejos y precauciones, puedes disfrutar usando archivos apklz sin ninguna preocupación.

    - -

    Preguntas frecuentes

    -

    ¿Cuál es la diferencia entre apklz y apk?

    -

    Apklz y apk son formatos de archivo que se utilizan para distribuir e instalar aplicaciones en dispositivos Android. La principal diferencia entre ellos es que los archivos apklz son versiones comprimidas de archivos apk, lo que significa que tienen tamaños de archivo más pequeños y velocidades de descarga más rápidas. Sin embargo, también tienen algunos inconvenientes, como ser menos seguro, menos legal y menos compatible que los archivos apk.

    -

    ¿Cómo puedo abrir un archivo apklz en mi PC o Mac?

    -

    Para abrir un archivo apklz en su PC o Mac, es necesario utilizar un software que puede extraer o descomprimir el archivo. Algunos de los programas que pueden abrir archivos apklz son 7-Zip, WinRAR, o PeaZip. Una vez que extraiga el archivo, obtendrá un archivo apk que puede abrir con un emulador de Android como BlueStacks, NoxPlayer, o ApkLZ Creator, ApkLZ Maker, o Compresor de ApkLZ en línea o Generador de ApkLZ en línea.

    -

    ¿Cómo puedo editar o modificar un archivo apklz?

    -

    Para editar o modificar un archivo apklz, es necesario utilizar un software que puede editar o modificar el archivo. Algunos de los programas que pueden editar o modificar archivos apklz son APK Editor, APK Tool, o Editor de ApkLZ en línea o Modificador de ApkLZ en línea.

    -

    ¿Cómo puedo comprobar si un archivo apklz es seguro o no?

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cazador Asesino Mod Apk Ilimitado Todo.md b/spaces/Benson/text-generation/Examples/Cazador Asesino Mod Apk Ilimitado Todo.md deleted file mode 100644 index ccf75ff3ff53a04afead3841276ce5731692f479..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cazador Asesino Mod Apk Ilimitado Todo.md +++ /dev/null @@ -1,81 +0,0 @@ - -

    Hunter Assassin Mod APK: Un juego sigiloso y estratégico para los usuarios de Android

    -

    Si usted está buscando un juego móvil divertido y desafiante que pone a prueba sus habilidades de sigilo y estrategia, es posible que desee probar Hunter Assassin. Este es un juego popular que tiene más de 100 millones de descargas en Google Play Store. Pero lo que si quieres disfrutar del juego con recursos y características ilimitadas? Ahí es donde Hunter Assassin Mod APK entra en juego. En este artículo, te contaremos todo lo que necesitas saber sobre Hunter Assassin y su versión modificada.

    -

    cazador asesino mod apk ilimitado todo


    Download File ---> https://bltlly.com/2v6Khv



    -

    ¿Qué es Hunter Assassin?

    -

    Hunter Assassin es un juego para móviles desarrollado por Ruby Game Studio. Es un juego de ritmo rápido que requiere controlar a un asesino que tiene un cuchillo mortal. Su misión es eliminar todos los objetivos en cada nivel sin ser detectado por los guardias. Tienes que usar tu velocidad, agilidad y astucia para sorprender a tus enemigos y derrotarlos uno por uno.

    -

    El juego de Hunter Assassin

    -

    El juego de Hunter Assassin es simple pero adictivo. Tienes que tocar la pantalla para mover a tu asesino y deslizar el dedo para cambiar la dirección. Tienes que evitar los rayos de linterna de los guardias y esconderte detrás de las paredes, cajas o barriles. Tienes que llegar a tu objetivo y tocarlos para matarlos. También puede recoger gemas y llaves en el camino, que se pueden utilizar para desbloquear nuevos asesinos con diferentes habilidades y habilidades.

    -

    Las características de Hunter Assassin

    -

    Hunter Assassin tiene muchas características que lo convierten en un juego agradable para jugadores de todas las edades. Algunas de estas características son:

    -
      -
    • Más de 500 niveles con dificultad y variedad crecientes.
    • -
    • Diferentes tipos de guardias con diferentes comportamientos y armas.
    • -
    • Diferentes tipos de asesinos con diferentes estadísticas y apariencias.
    • -
    • Controles simples e intuitivos que son fáciles de aprender.
    • -
    • Gráficos suaves y coloridos que crean una atmósfera vívida.
    • - -
    -

    ¿Qué es Hunter Assassin Mod APK?

    -

    Hunter Assassin Mod APK es una versión modificada del juego original que le da acceso a recursos y características ilimitadas. Con esta versión modificada, puedes disfrutar del juego sin limitaciones ni restricciones. Puedes jugar con dinero ilimitado, gemas, llaves y diamantes. También puede desbloquear todos los asesinos y niveles sin gastar dinero real. También puede eliminar los anuncios que podrían interrumpir su experiencia de juego.

    -

    -

    Los beneficios de Hunter Assassin Mod APK

    -

    Hunter Assassin Mod APK tiene muchos beneficios que lo hacen una mejor opción que el juego original. Algunos de estos beneficios son:

    -
      -
    • Puedes jugar el juego sin preocuparte por quedarte sin recursos o esperar a que se regeneren.
    • -
    • Puede desbloquear todos los asesinos y niveles sin completar ninguna tarea o logros.
    • -
    • Puedes personalizar la apariencia y habilidades de tu asesino de acuerdo a tu preferencia.
    • -
    • Puedes disfrutar del juego sin ningún anuncio o pop-ups que puedan distraerte o ralentizar tu dispositivo.
    • -
    • Puedes divertirte más y desafiarte a ti mismo con los recursos y características ilimitados.
    • -
    -

    Los inconvenientes de Hunter Assassin Mod APK

    -

    Hunter Assassin Mod APK también tiene algunos inconvenientes que usted debe tener en cuenta antes de descargar e instalar. Algunos de estos inconvenientes son:

    -
      -
    • Es posible que tenga algunos problemas de compatibilidad o errores con algunos dispositivos o sistemas operativos.
    • -
    • Puedes perder tu progreso o datos si desinstalas la versión modificada o actualizas el juego original.
    • -
    • Es posible que te prohíban o suspendan del juego si usas la versión modded en línea o en modo multijugador.
    • -
    • Es posible que se pierda algunas actualizaciones o nuevas características que se agregan al juego original por los desarrolladores.
    • -
    • Usted puede perder la emoción y la satisfacción de jugar el juego justo y cuadrado.
    • -
    - -

    Si desea descargar e instalar Hunter Assassin Mod APK, usted tiene que seguir algunos pasos simples y precauciones. Aquí hay una guía sobre cómo hacerlo:

    -

    Los pasos para descargar e instalar Hunter Assassin Mod APK

    -
      -
    1. Vaya a un sitio web confiable y confiable que ofrece la versión modificada del juego. Puede buscarlo en Google o usar el enlace de abajo.
    2. -
    3. Descargar el archivo APK de la versión modificada. Asegúrese de que tiene suficiente espacio de almacenamiento en el dispositivo.
    4. -
    5. Ve a la configuración de tu dispositivo y habilita la opción de instalar aplicaciones desde fuentes desconocidas. Esto le permitirá instalar la versión modded sin ningún problema.
    6. -
    7. Busque el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación.
    8. -
    9. Siga las instrucciones en la pantalla y espere a que termine la instalación.
    10. -
    11. Iniciar el juego y disfrutar de los recursos y características ilimitadas.
    12. -
    -

    Las precauciones a tomar antes de descargar e instalar Hunter Assassin Mod APK

    -

    Antes de descargar e instalar Hunter Assassin Mod APK, usted debe tomar algunas precauciones para evitar cualquier problema o riesgo. Aquí están algunos de ellos:

    -
      -
    • Asegúrese de tener una conexión a Internet estable y un buen software antivirus en su dispositivo.
    • -
    • Asegúrese de descargar la versión modificada de un sitio web confiable y confiable. Evite cualquier enlace sospechoso o malicioso que pueda dañar su dispositivo o datos.
    • -
    • Asegúrese de hacer una copia de seguridad de sus datos y el progreso del juego original. Puede usar un servicio en la nube o un dispositivo de almacenamiento externo para este propósito.
    • -
    • Asegúrese de desinstalar el juego original antes de instalar la versión modificada. Esto evitará cualquier conflicto o error entre las dos versiones.
    • -
    • Asegúrese de usar la versión modded sin conexión o en modo para un jugador. No lo uses en línea o en modo multijugador, ya que esto puede hacer que te prohíban o suspendan del juego.
    • -
    -

    Conclusión

    - -

    Hunter Assassin Mod APK es una versión modificada del juego original que le da acceso a recursos y características ilimitadas. Puedes jugar con dinero ilimitado, gemas, llaves y diamantes. También puede desbloquear todos los asesinos y niveles sin gastar dinero real. También puede eliminar los anuncios que podrían interrumpir su experiencia de juego.

    -

    Si desea descargar e instalar Hunter Assassin Mod APK, usted tiene que seguir algunos pasos simples y precauciones. Tienes que ir a un sitio web confiable y confiable que ofrece la versión modificada del juego. Tienes que descargar el archivo APK de la versión modificada y habilitar la opción de instalar aplicaciones de fuentes desconocidas en tu dispositivo. Usted tiene que localizar el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Tienes que lanzar el juego y disfrutar de los recursos y características ilimitadas.

    -

    Sin embargo, también debe ser consciente de algunos inconvenientes de Hunter Assassin Mod APK. Es posible que se enfrenten a algunos problemas de compatibilidad o errores con algunos dispositivos o sistemas operativos. Puede perder su progreso o datos si desinstala la versión modificada o actualiza el juego original. Es posible que te prohíban o suspendan del juego si usas la versión modificada en línea o en modo multijugador. Es posible que se pierda algunas actualizaciones o nuevas características que se agregan al juego original por los desarrolladores. Usted puede perder la emoción y la satisfacción de jugar el juego justo y cuadrado.

    -

    Por lo tanto, usted debe pesar los pros y los contras de Hunter Assassin Mod APK antes de decidir descargarlo e instalarlo. También debe seguir los pasos y precauciones cuidadosamente para evitar cualquier problema o riesgo. También debes respetar las reglas y políticas del juego y jugar responsablemente.

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Hunter Assassin y Hunter Assassin Mod APK:

    -
      -
    1. ¿Cuál es la última versión de Hunter Assassin Mod APK?
    2. - -
    3. ¿Es seguro descargar e instalar Hunter Assassin Mod APK?
    4. -

      Hunter Assassin Mod APK es seguro de descargar e instalar si lo obtiene de un sitio web confiable y confiable. Sin embargo, siempre debe escanear el archivo APK con un buen software antivirus antes de instalarlo. También debes hacer una copia de seguridad de tus datos y progreso del juego original antes de instalar la versión modificada.

      -
    5. ¿Puedo jugar Hunter Assassin Mod APK offline?
    6. -

      Sí, puede jugar Hunter Assassin Mod APK sin conexión a Internet. Sin embargo, es posible que no pueda acceder a algunas funciones o actualizaciones que requieren una conexión en línea.

      -
    7. ¿Puedo jugar Hunter Assassin Mod APK con mis amigos?
    8. -

      No, no se puede jugar Hunter Assassin Mod APK con tus amigos, ya que no tiene un modo multijugador. Solo se puede jugar en el modo de un solo jugador. Si quieres jugar con tus amigos, deberías usar el juego original.

      -
    9. ¿Cómo puedo contactar a los desarrolladores de Hunter Assassin?
    10. -

      Puede ponerse en contacto con los desarrolladores de Hunter Assassin enviándoles un correo electrónico a support@rubygamestudio.com. También puede visitar su sitio web en https://www.rubygamestudio.com/ o seguirlos en Facebook en https://www.facebook.com/rubygamestudio.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/BetterAPI/BetterChat_new/src/lib/types/SharedConversation.ts b/spaces/BetterAPI/BetterChat_new/src/lib/types/SharedConversation.ts deleted file mode 100644 index 8b44254621c1292e331e874aaeea7f21340bd041..0000000000000000000000000000000000000000 --- a/spaces/BetterAPI/BetterChat_new/src/lib/types/SharedConversation.ts +++ /dev/null @@ -1,11 +0,0 @@ -import type { Message } from "./Message"; -import type { Timestamps } from "./Timestamps"; - -export interface SharedConversation extends Timestamps { - _id: string; - - hash: string; - - title: string; - messages: Message[]; -} diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/cache.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/cache.py deleted file mode 100644 index e96d2b4924c468c666f3ad6dab902f217ee43c39..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/cache.py +++ /dev/null @@ -1,222 +0,0 @@ -import os -import textwrap -from optparse import Values -from typing import Any, List - -import pip._internal.utils.filesystem as filesystem -from pip._internal.cli.base_command import Command -from pip._internal.cli.status_codes import ERROR, SUCCESS -from pip._internal.exceptions import CommandError, PipError -from pip._internal.utils.logging import getLogger - -logger = getLogger(__name__) - - -class CacheCommand(Command): - """ - Inspect and manage pip's wheel cache. - - Subcommands: - - - dir: Show the cache directory. - - info: Show information about the cache. - - list: List filenames of packages stored in the cache. - - remove: Remove one or more package from the cache. - - purge: Remove all items from the cache. - - ```` can be a glob expression or a package name. - """ - - ignore_require_venv = True - usage = """ - %prog dir - %prog info - %prog list [] [--format=[human, abspath]] - %prog remove - %prog purge - """ - - def add_options(self) -> None: - self.cmd_opts.add_option( - "--format", - action="store", - dest="list_format", - default="human", - choices=("human", "abspath"), - help="Select the output format among: human (default) or abspath", - ) - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options: Values, args: List[str]) -> int: - handlers = { - "dir": self.get_cache_dir, - "info": self.get_cache_info, - "list": self.list_cache_items, - "remove": self.remove_cache_items, - "purge": self.purge_cache, - } - - if not options.cache_dir: - logger.error("pip cache commands can not function since cache is disabled.") - return ERROR - - # Determine action - if not args or args[0] not in handlers: - logger.error( - "Need an action (%s) to perform.", - ", ".join(sorted(handlers)), - ) - return ERROR - - action = args[0] - - # Error handling happens here, not in the action-handlers. - try: - handlers[action](options, args[1:]) - except PipError as e: - logger.error(e.args[0]) - return ERROR - - return SUCCESS - - def get_cache_dir(self, options: Values, args: List[Any]) -> None: - if args: - raise CommandError("Too many arguments") - - logger.info(options.cache_dir) - - def get_cache_info(self, options: Values, args: List[Any]) -> None: - if args: - raise CommandError("Too many arguments") - - num_http_files = len(self._find_http_files(options)) - num_packages = len(self._find_wheels(options, "*")) - - http_cache_location = self._cache_dir(options, "http") - wheels_cache_location = self._cache_dir(options, "wheels") - http_cache_size = filesystem.format_directory_size(http_cache_location) - wheels_cache_size = filesystem.format_directory_size(wheels_cache_location) - - message = ( - textwrap.dedent( - """ - Package index page cache location: {http_cache_location} - Package index page cache size: {http_cache_size} - Number of HTTP files: {num_http_files} - Locally built wheels location: {wheels_cache_location} - Locally built wheels size: {wheels_cache_size} - Number of locally built wheels: {package_count} - """ - ) - .format( - http_cache_location=http_cache_location, - http_cache_size=http_cache_size, - num_http_files=num_http_files, - wheels_cache_location=wheels_cache_location, - package_count=num_packages, - wheels_cache_size=wheels_cache_size, - ) - .strip() - ) - - logger.info(message) - - def list_cache_items(self, options: Values, args: List[Any]) -> None: - if len(args) > 1: - raise CommandError("Too many arguments") - - if args: - pattern = args[0] - else: - pattern = "*" - - files = self._find_wheels(options, pattern) - if options.list_format == "human": - self.format_for_human(files) - else: - self.format_for_abspath(files) - - def format_for_human(self, files: List[str]) -> None: - if not files: - logger.info("No locally built wheels cached.") - return - - results = [] - for filename in files: - wheel = os.path.basename(filename) - size = filesystem.format_file_size(filename) - results.append(f" - {wheel} ({size})") - logger.info("Cache contents:\n") - logger.info("\n".join(sorted(results))) - - def format_for_abspath(self, files: List[str]) -> None: - if not files: - return - - results = [] - for filename in files: - results.append(filename) - - logger.info("\n".join(sorted(results))) - - def remove_cache_items(self, options: Values, args: List[Any]) -> None: - if len(args) > 1: - raise CommandError("Too many arguments") - - if not args: - raise CommandError("Please provide a pattern") - - files = self._find_wheels(options, args[0]) - - no_matching_msg = "No matching packages" - if args[0] == "*": - # Only fetch http files if no specific pattern given - files += self._find_http_files(options) - else: - # Add the pattern to the log message - no_matching_msg += ' for pattern "{}"'.format(args[0]) - - if not files: - logger.warning(no_matching_msg) - - for filename in files: - os.unlink(filename) - logger.verbose("Removed %s", filename) - logger.info("Files removed: %s", len(files)) - - def purge_cache(self, options: Values, args: List[Any]) -> None: - if args: - raise CommandError("Too many arguments") - - return self.remove_cache_items(options, ["*"]) - - def _cache_dir(self, options: Values, subdir: str) -> str: - return os.path.join(options.cache_dir, subdir) - - def _find_http_files(self, options: Values) -> List[str]: - http_dir = self._cache_dir(options, "http") - return filesystem.find_files(http_dir, "*") - - def _find_wheels(self, options: Values, pattern: str) -> List[str]: - wheel_dir = self._cache_dir(options, "wheels") - - # The wheel filename format, as specified in PEP 427, is: - # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl - # - # Additionally, non-alphanumeric values in the distribution are - # normalized to underscores (_), meaning hyphens can never occur - # before `-{version}`. - # - # Given that information: - # - If the pattern we're given contains a hyphen (-), the user is - # providing at least the version. Thus, we can just append `*.whl` - # to match the rest of it. - # - If the pattern we're given doesn't contain a hyphen (-), the - # user is only providing the name. Thus, we append `-*.whl` to - # match the hyphen before the version, followed by anything else. - # - # PEP 427: https://www.python.org/dev/peps/pep-0427/ - pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl") - - return filesystem.find_files(wheel_dir, pattern) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_boxes.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_boxes.py deleted file mode 100644 index d94c91d742036c57752a6fcb95a924488ad20312..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_boxes.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -import math -import numpy as np -import unittest -import torch - -from detectron2.structures import Boxes, BoxMode, pairwise_iou - - -class TestBoxMode(unittest.TestCase): - def _convert_xy_to_wh(self, x): - return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) - - def _convert_xywha_to_xyxy(self, x): - return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS) - - def _convert_xywh_to_xywha(self, x): - return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS) - - def test_box_convert_list(self): - for tp in [list, tuple]: - box = tp([5.0, 5.0, 10.0, 10.0]) - output = self._convert_xy_to_wh(box) - self.assertIsInstance(output, tp) - self.assertIsInstance(output[0], float) - self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0])) - - with self.assertRaises(Exception): - self._convert_xy_to_wh([box]) - - def test_box_convert_array(self): - box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]]) - output = self._convert_xy_to_wh(box) - self.assertEqual(output.dtype, box.dtype) - self.assertEqual(output.shape, box.shape) - self.assertTrue((output[0] == [5, 5, 5, 5]).all()) - self.assertTrue((output[1] == [1, 1, 1, 2]).all()) - - def test_box_convert_cpu_tensor(self): - box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) - output = self._convert_xy_to_wh(box) - self.assertEqual(output.dtype, box.dtype) - self.assertEqual(output.shape, box.shape) - output = output.numpy() - self.assertTrue((output[0] == [5, 5, 5, 5]).all()) - self.assertTrue((output[1] == [1, 1, 1, 2]).all()) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_box_convert_cuda_tensor(self): - box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda() - output = self._convert_xy_to_wh(box) - self.assertEqual(output.dtype, box.dtype) - self.assertEqual(output.shape, box.shape) - self.assertEqual(output.device, box.device) - output = output.cpu().numpy() - self.assertTrue((output[0] == [5, 5, 5, 5]).all()) - self.assertTrue((output[1] == [1, 1, 1, 2]).all()) - - def test_box_convert_xywha_to_xyxy_list(self): - for tp in [list, tuple]: - box = tp([50, 50, 30, 20, 0]) - output = self._convert_xywha_to_xyxy(box) - self.assertIsInstance(output, tp) - self.assertEqual(output, tp([35, 40, 65, 60])) - - with self.assertRaises(Exception): - self._convert_xywha_to_xyxy([box]) - - def test_box_convert_xywha_to_xyxy_array(self): - for dtype in [np.float64, np.float32]: - box = np.asarray( - [ - [50, 50, 30, 20, 0], - [50, 50, 30, 20, 90], - [1, 1, math.sqrt(2), math.sqrt(2), -45], - ], - dtype=dtype, - ) - output = self._convert_xywha_to_xyxy(box) - self.assertEqual(output.dtype, box.dtype) - expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) - self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) - - def test_box_convert_xywha_to_xyxy_tensor(self): - for dtype in [torch.float32, torch.float64]: - box = torch.tensor( - [ - [50, 50, 30, 20, 0], - [50, 50, 30, 20, 90], - [1, 1, math.sqrt(2), math.sqrt(2), -45], - ], - dtype=dtype, - ) - output = self._convert_xywha_to_xyxy(box) - self.assertEqual(output.dtype, box.dtype) - expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype) - - self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) - - def test_box_convert_xywh_to_xywha_list(self): - for tp in [list, tuple]: - box = tp([50, 50, 30, 20]) - output = self._convert_xywh_to_xywha(box) - self.assertIsInstance(output, tp) - self.assertEqual(output, tp([65, 60, 30, 20, 0])) - - with self.assertRaises(Exception): - self._convert_xywh_to_xywha([box]) - - def test_box_convert_xywh_to_xywha_array(self): - for dtype in [np.float64, np.float32]: - box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) - output = self._convert_xywh_to_xywha(box) - self.assertEqual(output.dtype, box.dtype) - expected = np.asarray( - [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype - ) - self.assertTrue(np.allclose(output, expected, atol=1e-6), "output={}".format(output)) - - def test_box_convert_xywh_to_xywha_tensor(self): - for dtype in [torch.float32, torch.float64]: - box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [-1, -1, 2, 2]], dtype=dtype) - output = self._convert_xywh_to_xywha(box) - self.assertEqual(output.dtype, box.dtype) - expected = torch.tensor( - [[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype - ) - - self.assertTrue(torch.allclose(output, expected, atol=1e-6), "output={}".format(output)) - - def test_json_serializable(self): - payload = {"box_mode": BoxMode.XYWH_REL} - try: - json.dumps(payload) - except Exception: - self.fail("JSON serialization failed") - - def test_json_deserializable(self): - payload = '{"box_mode": 2}' - obj = json.loads(payload) - try: - obj["box_mode"] = BoxMode(obj["box_mode"]) - except Exception: - self.fail("JSON deserialization failed") - - -class TestBoxIOU(unittest.TestCase): - def test_pairwise_iou(self): - boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) - - boxes2 = torch.tensor( - [ - [0.0, 0.0, 1.0, 1.0], - [0.0, 0.0, 0.5, 1.0], - [0.0, 0.0, 1.0, 0.5], - [0.0, 0.0, 0.5, 0.5], - [0.5, 0.5, 1.0, 1.0], - [0.5, 0.5, 1.5, 1.5], - ] - ) - - expected_ious = torch.tensor( - [ - [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], - [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], - ] - ) - - ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2)) - - self.assertTrue(torch.allclose(ious, expected_ious)) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/ChevyWithAI/rvc-aicover/config.py b/spaces/ChevyWithAI/rvc-aicover/config.py deleted file mode 100644 index c0c16e0017efbcaf250cb539a1d0edb4e83575e4..0000000000000000000000000000000000000000 --- a/spaces/ChevyWithAI/rvc-aicover/config.py +++ /dev/null @@ -1,88 +0,0 @@ -########################硬件参数######################## - -# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速 -device = "cuda:0" - -# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速 -is_half = True - -# 默认0用上所有线程,写数字限制CPU资源使用 -n_cpu = 0 - -########################硬件参数######################## - - -##################下为参数处理逻辑,勿动################## - -########################命令行参数######################## -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--port", type=int, default=7865, help="Listen port") -parser.add_argument("--pycmd", type=str, default="python", help="Python command") -parser.add_argument("--colab", action="store_true", help="Launch in colab") -parser.add_argument( - "--noparallel", action="store_true", help="Disable parallel processing" -) -parser.add_argument( - "--noautoopen", action="store_true", help="Do not open in browser automatically" -) -cmd_opts, unknown = parser.parse_known_args() - -python_cmd = cmd_opts.pycmd -listen_port = cmd_opts.port -iscolab = cmd_opts.colab -noparallel = cmd_opts.noparallel -noautoopen = cmd_opts.noautoopen -########################命令行参数######################## - -import sys -import torch - - -# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. -# check `getattr` and try it for compatibility -def has_mps() -> bool: - if sys.platform != "darwin": - return False - else: - if not getattr(torch, "has_mps", False): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - -if not torch.cuda.is_available(): - if has_mps(): - print("没有发现支持的N卡, 使用MPS进行推理") - device = "mps" - else: - print("没有发现支持的N卡, 使用CPU进行推理") - device = "cpu" - is_half = False - -if device not in ["cpu", "mps"]: - gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1])) - if "16" in gpu_name or "MX" in gpu_name: - print("16系显卡/MX系显卡强制单精度") - is_half = False - -from multiprocessing import cpu_count - -if n_cpu == 0: - n_cpu = cpu_count() -if is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 -else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/maimai_join/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/maimai_join/__init__.py deleted file mode 100644 index b923e7fb5dbc944a0560aa89ccf8ce215d5b8c58..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/maimai_join/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from pathlib import Path -from typing import List - -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import make_jpg_or_gif - -img_dir = Path(__file__).parent / "images" - - -def maimai_join(images: List[BuildImage], texts, args): - frame = BuildImage.open(img_dir / "0.png") - - def make(img: BuildImage) -> BuildImage: - img = img.convert("RGBA").square().resize((400, 400)) - return frame.copy().paste(img, (50, 50), alpha=True, below=True) - - return make_jpg_or_gif(images[0], make) - - -add_meme("maimai_join", maimai_join, min_images=1, max_images=1, keywords=["旅行伙伴加入"]) diff --git a/spaces/CjangCjengh/Sanskrit-TTS/attentions.py b/spaces/CjangCjengh/Sanskrit-TTS/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/CjangCjengh/Sanskrit-TTS/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fpn.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fpn.py deleted file mode 100644 index abd171776d8e10f4ac657303f5d1bfad624569dd..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/fpn.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch -import torch.nn.functional as F -from torch import nn - - -class FPN(nn.Module): - """ - Module that adds FPN on top of a list of feature maps. - The feature maps are currently supposed to be in increasing depth order, and must be consecutive - """ - - def __init__( - self, in_channels_list, out_channels, conv_block, top_blocks=None - ): - """ - Arguments: - in_channels_list (list[int]): number of channels for each feature map that - will be fed - out_channels (int): number of channels of the FPN representation - top_blocks (nn.Module or None): if provided, an extra operation will - be performed on the output of the last (smallest resolution) - FPN output, and the result will extend the result list - """ - super(FPN, self).__init__() - self.inner_blocks = [] - self.layer_blocks = [] - for idx, in_channels in enumerate(in_channels_list, 1): - inner_block = "fpn_inner{}".format(idx) - layer_block = "fpn_layer{}".format(idx) - - if in_channels == 0: - continue - inner_block_module = conv_block(in_channels, out_channels, 1) - layer_block_module = conv_block(out_channels, out_channels, 3, 1) - self.add_module(inner_block, inner_block_module) - self.add_module(layer_block, layer_block_module) - self.inner_blocks.append(inner_block) - self.layer_blocks.append(layer_block) - self.top_blocks = top_blocks - - def forward(self, x): - """ - Arguments: - x (list[Tensor]): feature maps for each feature level. - Returns: - results (tuple[Tensor]): feature maps after FPN layers. - They are ordered from highest resolution first. - """ - last_inner = getattr(self, self.inner_blocks[-1])(x[-1]) - results = [] - results.append(getattr(self, self.layer_blocks[-1])(last_inner)) - for feature, inner_block, layer_block in zip( - x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1] - ): - if not inner_block: - continue - inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest") - inner_lateral = getattr(self, inner_block)(feature) - # TODO use size instead of scale to make it robust to different sizes - # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:], - # mode='bilinear', align_corners=False) - last_inner = inner_lateral + inner_top_down - results.insert(0, getattr(self, layer_block)(last_inner)) - - if isinstance(self.top_blocks, LastLevelP6P7): - last_results = self.top_blocks(x[-1], results[-1]) - results.extend(last_results) - elif isinstance(self.top_blocks, LastLevelMaxPool): - last_results = self.top_blocks(results[-1]) - results.extend(last_results) - - return tuple(results) - - -class LastLevelMaxPool(nn.Module): - def forward(self, x): - return [F.max_pool2d(x, 1, 2, 0)] - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7. - """ - def __init__(self, in_channels, out_channels): - super(LastLevelP6P7, self).__init__() - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - for module in [self.p6, self.p7]: - nn.init.kaiming_uniform_(module.weight, a=1) - nn.init.constant_(module.bias, 0) - self.use_P5 = in_channels == out_channels - - def forward(self, c5, p5): - x = p5 if self.use_P5 else c5 - p6 = self.p6(x) - p7 = self.p7(F.relu(p6)) - return [p6, p7] diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9af10d66.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9af10d66.js deleted file mode 100644 index 44ee9c0db832dec4ae65f6ce32776608b57a0aba..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-9af10d66.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as o,e as s,s as a}from"./index-1d65707a.js";class n extends o{constructor(e){super(),s(this,e,null,null,a,{})}}const c=n,p=["static"],d=t=>({type:{payload:"Any"},description:{payload:"stored state value"},example_data:""});export{c as Component,d as document,p as modes}; -//# sourceMappingURL=index-9af10d66.js.map diff --git a/spaces/Detomo/Object_detection/app.py b/spaces/Detomo/Object_detection/app.py deleted file mode 100644 index fee4535a32fa7b2db808da23cb03b9b15fc572a9..0000000000000000000000000000000000000000 --- a/spaces/Detomo/Object_detection/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import gradio as gr -import torch -from sahi.prediction import ObjectPrediction -from sahi.utils.cv import visualize_object_predictions, read_image -from ultralyticsplus import YOLO, render_result - - -def yolov8_inference( - image, - model_path, - image_size, - conf_threshold, - iou_threshold, -): - """ - YOLOv8 inference function - Args: - image: Input image - model_path: Path to the model - image_size: Image size - conf_threshold: Confidence threshold - iou_threshold: IOU threshold - Returns: - Rendered image - """ - model = YOLO(f'kadirnar/{model_path}-v8.0') - # set model parameters - model.overrides['conf'] = conf_threshold # NMS confidence threshold - model.overrides['iou'] = iou_threshold # NMS IoU threshold - model.overrides['agnostic_nms'] = False # NMS class-agnostic - model.overrides['max_det'] = 1000 # maximum number of detections per image - results = model.predict(image, imgsz=image_size) - render = render_result(model=model, image=image, result=results[0]) - return render - - -inputs = [ - gr.Image(type="filepath", label="Input Image"), - gr.Dropdown(["yolov8n", "yolov8m", "yolov8l", "yolov8x"], - default="yolov8m", label="Model"), - gr.Slider(minimum=320, maximum=1280, default=640, step=320, label="Image Size"), - gr.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), - gr.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), -] - -outputs = gr.Image(type="filepath", label="Output Image") -title = "State-of-the-Art YOLO Models for Object detection" - -examples = [['demo_01.jpg', 'yolov8n', 640, 0.25, 0.45], ['demo_02.jpg', 'yolov8l', 640, 0.25, 0.45], ['demo_03.jpg', 'yolov8x', 1280, 0.25, 0.45]] -demo_app = gr.Interface( - fn=yolov8_inference, - inputs=inputs, - outputs=outputs, - title=title, - examples=examples, - cache_examples=True, -) -demo_app.launch(debug=True) \ No newline at end of file diff --git a/spaces/Detomo/ai-comic-generation/src/lib/generateSeed.ts b/spaces/Detomo/ai-comic-generation/src/lib/generateSeed.ts deleted file mode 100644 index 563e25ec894ab5af54c5025a15a9b7a5918325de..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/lib/generateSeed.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function generateSeed() { - return Math.floor(Math.random() * Math.pow(2, 31)); -} \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/PTI/criteria/localitly_regulizer.py b/spaces/DragGan/DragGan-Inversion/PTI/criteria/localitly_regulizer.py deleted file mode 100644 index 09a5a40d44153bd0110d22b2d9a4d50970cf7515..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/criteria/localitly_regulizer.py +++ /dev/null @@ -1,65 +0,0 @@ -import torch -import numpy as np -from PTI.criteria import l2_loss -from PTI.configs import hyperparameters -from PTI.configs import global_config - - -class Space_Regulizer: - def __init__(self, original_G, lpips_net): - self.original_G = original_G - self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha - self.lpips_loss = lpips_net - - def get_morphed_w_code(self, new_w_code, fixed_w): - interpolation_direction = new_w_code - fixed_w - interpolation_direction_norm = torch.norm(interpolation_direction, p=2) - direction_to_move = hyperparameters.regulizer_alpha * \ - interpolation_direction / interpolation_direction_norm - result_w = fixed_w + direction_to_move - self.morphing_regulizer_alpha * fixed_w + \ - (1 - self.morphing_regulizer_alpha) * new_w_code - - return result_w - - def get_image_from_ws(self, w_codes, G): - return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes]) - - def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False): - loss = 0.0 - - z_samples = np.random.randn( - num_of_sampled_latents, self.original_G.z_dim) - w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None, - truncation_psi=0.5) - territory_indicator_ws = [self.get_morphed_w_code( - w_code.unsqueeze(0), w_batch) for w_code in w_samples] - - for w_code in territory_indicator_ws: - new_img = new_G.synthesis( - w_code, noise_mode='none', force_fp32=True) - with torch.no_grad(): - old_img = self.original_G.synthesis( - w_code, noise_mode='none', force_fp32=True) - - if hyperparameters.regulizer_l2_lambda > 0: - l2_loss_val = l2_loss.l2_loss(old_img, new_img) - if use_wandb: - wandb.log({f'space_regulizer_l2_loss_val': l2_loss_val.detach().cpu()}, - step=global_config.training_step) - loss += l2_loss_val * hyperparameters.regulizer_l2_lambda - - if hyperparameters.regulizer_lpips_lambda > 0: - loss_lpips = self.lpips_loss(old_img, new_img) - loss_lpips = torch.mean(torch.squeeze(loss_lpips)) - if use_wandb: - wandb.log({f'space_regulizer_lpips_loss_val': loss_lpips.detach().cpu()}, - step=global_config.training_step) - loss += loss_lpips * hyperparameters.regulizer_lpips_lambda - - return loss / len(territory_indicator_ws) - - def space_regulizer_loss(self, new_G, w_batch, use_wandb): - ret_val = self.ball_holder_loss_lazy( - new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb) - return ret_val diff --git a/spaces/DragGan/DragGan/stylegan_human/training/networks_stylegan2.py b/spaces/DragGan/DragGan/stylegan_human/training/networks_stylegan2.py deleted file mode 100644 index 14c717927b2ad41681f5471511e428504808f4fe..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/training/networks_stylegan2.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Network architectures from the paper -"Analyzing and Improving the Image Quality of StyleGAN". -Matches the original implementation of configs E-F by Karras et al. at -https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" - -import numpy as np -import torch -import torch.nn.functional as F -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_resample -from torch_utils.ops import upfirdn2d -from torch_utils.ops import bias_act -from torch_utils.ops import fma - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def normalize_2nd_moment(x, dim=1, eps=1e-8): - return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def modulated_conv2d( - x, # Input tensor of shape [batch_size, in_channels, in_height, in_width]. - weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. - styles, # Modulation coefficients of shape [batch_size, in_channels]. - noise = None, # Optional noise tensor to add to the output activations. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - padding = 0, # Padding with respect to the upsampled image. - resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). - demodulate = True, # Apply weight demodulation? - flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d). - fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation? -): - batch_size = x.shape[0] - out_channels, in_channels, kh, kw = weight.shape - misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(styles, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs to avoid FP16 overflow. - if x.dtype == torch.float16 and demodulate: - weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk - styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I - - # Calculate per-sample weights and demodulation coefficients. - w = None - dcoefs = None - if demodulate or fused_modconv: - w = weight.unsqueeze(0) # [NOIkk] - w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] - if demodulate: - dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] - if demodulate and fused_modconv: - w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] - - # Execute by scaling the activations before and after the convolution. - if not fused_modconv: - x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) - x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight) - if demodulate and noise is not None: - x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)) - elif demodulate: - x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) - elif noise is not None: - x = x.add_(noise.to(x.dtype)) - return x - - # Execute as one fused op using grouped convolution. - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(batch_size) - misc.assert_shape(x, [batch_size, in_channels, None, None]) - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight) - x = x.reshape(batch_size, -1, *x.shape[2:]) - if noise is not None: - x = x.add_(noise) - return x - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class FullyConnectedLayer(torch.nn.Module): - def __init__(self, - in_features, # Number of input features. - out_features, # Number of output features. - bias = True, # Apply additive bias before the activation function? - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - lr_multiplier = 1, # Learning rate multiplier. - bias_init = 0, # Initial value for the additive bias. - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.activation = activation - self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier) - self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None - self.weight_gain = lr_multiplier / np.sqrt(in_features) - self.bias_gain = lr_multiplier - - def forward(self, x): - w = self.weight.to(x.dtype) * self.weight_gain - b = self.bias - if b is not None: - b = b.to(x.dtype) - if self.bias_gain != 1: - b = b * self.bias_gain - - if self.activation == 'linear' and b is not None: - x = torch.addmm(b.unsqueeze(0), x, w.t()) - else: - x = x.matmul(w.t()) - x = bias_act.bias_act(x, b, act=self.activation) - return x - - def extra_repr(self): - return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Conv2dLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - kernel_size, # Width and height of the convolution kernel. - bias = True, # Apply additive bias before the activation function? - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - up = 1, # Integer upsampling factor. - down = 1, # Integer downsampling factor. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output to +-X, None = disable clamping. - channels_last = False, # Expect the input to have memory_format=channels_last? - trainable = True, # Update the weights of this layer during training? - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.activation = activation - self.up = up - self.down = down - self.conv_clamp = conv_clamp - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - self.act_gain = bias_act.activation_funcs[activation].def_gain - - memory_format = torch.channels_last if channels_last else torch.contiguous_format - weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format) - bias = torch.zeros([out_channels]) if bias else None - if trainable: - self.weight = torch.nn.Parameter(weight) - self.bias = torch.nn.Parameter(bias) if bias is not None else None - else: - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - - def forward(self, x, gain=1): - w = self.weight * self.weight_gain - b = self.bias.to(x.dtype) if self.bias is not None else None - flip_weight = (self.up == 1) # slightly faster - x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},', - f'up={self.up}, down={self.down}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class MappingNetwork(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality, 0 = no latent. - c_dim, # Conditioning label (C) dimensionality, 0 = no label. - w_dim, # Intermediate latent (W) dimensionality. - num_ws, # Number of intermediate latents to output, None = do not broadcast. - num_layers = 8, # Number of mapping layers. - embed_features = None, # Label embedding dimensionality, None = same as w_dim. - layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. - w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.num_ws = num_ws - self.num_layers = num_layers - self.w_avg_beta = w_avg_beta - - if embed_features is None: - embed_features = w_dim - if c_dim == 0: - embed_features = 0 - if layer_features is None: - layer_features = w_dim - features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim] - - if c_dim > 0: - self.embed = FullyConnectedLayer(c_dim, embed_features) - for idx in range(num_layers): - in_features = features_list[idx] - out_features = features_list[idx + 1] - layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier) - setattr(self, f'fc{idx}', layer) - - if num_ws is not None and w_avg_beta is not None: - self.register_buffer('w_avg', torch.zeros([w_dim])) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): - # Embed, normalize, and concat inputs. - x = None - with torch.autograd.profiler.record_function('input'): - if self.z_dim > 0: - misc.assert_shape(z, [None, self.z_dim]) - x = normalize_2nd_moment(z.to(torch.float32)) - if self.c_dim > 0: - misc.assert_shape(c, [None, self.c_dim]) - y = normalize_2nd_moment(self.embed(c.to(torch.float32))) - x = torch.cat([x, y], dim=1) if x is not None else y - - # Main layers. - for idx in range(self.num_layers): - layer = getattr(self, f'fc{idx}') - x = layer(x) - - # Update moving average of W. - if update_emas and self.w_avg_beta is not None: - with torch.autograd.profiler.record_function('update_w_avg'): - self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) - - # Broadcast. - if self.num_ws is not None: - with torch.autograd.profiler.record_function('broadcast'): - x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) - - # Apply truncation. - if truncation_psi != 1: - with torch.autograd.profiler.record_function('truncate'): - assert self.w_avg_beta is not None - if self.num_ws is None or truncation_cutoff is None: - x = self.w_avg.lerp(x, truncation_psi) - else: - x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) - return x - - def extra_repr(self): - return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this layer. - kernel_size = 3, # Convolution kernel size. - up = 1, # Integer upsampling factor. - use_noise = True, # Enable noise input? - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - channels_last = False, # Use channels_last format for the weights? - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.resolution = resolution - self.up = up - self.use_noise = use_noise - self.activation = activation - self.conv_clamp = conv_clamp - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.act_gain = bias_act.activation_funcs[activation].def_gain - - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - if use_noise: - self.register_buffer('noise_const', torch.randn([resolution, resolution])) - self.noise_strength = torch.nn.Parameter(torch.zeros([])) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - - def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1): - assert noise_mode in ['random', 'const', 'none'] - in_resolution = self.resolution // self.up - misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution]) - styles = self.affine(w) - - noise = None - if self.use_noise and noise_mode == 'random': - noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength - if self.use_noise and noise_mode == 'const': - noise = self.noise_const * self.noise_strength - - flip_weight = (self.up == 1) # slightly faster - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, - padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},', - f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class ToRGBLayer(torch.nn.Module): - def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.conv_clamp = conv_clamp - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - - def forward(self, x, w, fused_modconv=True): - styles = self.affine(w) * self.weight_gain - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv) - x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) - return x - - def extra_repr(self): - return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisBlock(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels, 0 = first block. - out_channels, # Number of output channels. - w_dim, # Intermediate latent (W) dimensionality. - resolution, # Resolution of this block. - img_channels, # Number of output color channels. - is_last, # Is this the last block? - architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. - use_fp16 = False, # Use FP16 for this block? - fp16_channels_last = False, # Use channels-last memory format with FP16? - fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training. - **layer_kwargs, # Arguments for SynthesisLayer. - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.w_dim = w_dim - self.resolution = resolution - self.img_channels = img_channels - self.is_last = is_last - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.fused_modconv_default = fused_modconv_default - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.num_conv = 0 - self.num_torgb = 0 - - if in_channels == 0: - self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution])) - - if in_channels != 0: - self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2, - resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, - conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - if is_last or architecture == 'skip': - self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, - conv_clamp=conv_clamp, channels_last=self.channels_last) - self.num_torgb += 1 - - if in_channels != 0 and architecture == 'resnet': - self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, - resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): - _ = update_emas # unused - misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim]) - w_iter = iter(ws.unbind(dim=1)) - if ws.device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - if fused_modconv is None: - fused_modconv = self.fused_modconv_default - if fused_modconv == 'inference_only': - fused_modconv = (not self.training) - - # Input. - if self.in_channels == 0: - x = self.const.to(dtype=dtype, memory_format=memory_format) - x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) - else: - misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # Main layers. - if self.in_channels == 0: - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - elif self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs) - x = y.add_(x) - else: - x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs) - - # ToRGB. - if img is not None: - misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) - img = upfirdn2d.upsample2d(img, self.resample_filter) - if self.is_last or self.architecture == 'skip': - y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) - y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format) - img = img.add_(y) if img is not None else y - - assert x.dtype == dtype - assert img is None or img.dtype == torch.float32 - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output image resolution. - img_channels, # Number of color channels. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_fp16_res = 4, # Use FP16 for the N highest resolutions. - **block_kwargs, # Arguments for SynthesisBlock. - ): - assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0 - super().__init__() - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.num_fp16_res = num_fp16_res - self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)] - channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions} - fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - self.num_ws = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res // 2] if res > 4 else 0 - out_channels = channels_dict[res] - use_fp16 = (res >= fp16_resolution) - is_last = (res == self.img_resolution) - block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, - img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) - self.num_ws += block.num_conv - if is_last: - self.num_ws += block.num_torgb - setattr(self, f'b{res}', block) - - def forward(self, ws, return_feature=False, **block_kwargs): - block_ws = [] - features = [] - with torch.autograd.profiler.record_function('split_ws'): - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32) - w_idx = 0 - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) - w_idx += block.num_conv - - x = img = None - for res, cur_ws in zip(self.block_resolutions, block_ws): - block = getattr(self, f'b{res}') - x, img = block(x, img, cur_ws, **block_kwargs) - features.append(x) - if return_feature: - return img, features - else: - return img - - def extra_repr(self): - return ' '.join([ - f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', - f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', - f'num_fp16_res={self.num_fp16_res:d}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - c_dim, # Conditioning label (C) dimensionality. - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output resolution. - img_channels, # Number of output color channels. - mapping_kwargs = {}, # Arguments for MappingNetwork. - synthesis_kwargs = {}, # Arguments for SynthesisNetwork. - resize=None, - # **synthesis_kwargs, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) - self.resize = resize - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs): - if input_is_w: - ws = z - if ws.dim() == 2: - ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1]) - else: - ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) - img = self.synthesis(ws, update_emas=update_emas, return_feature=return_feature, **synthesis_kwargs) - if self.resize is not None: - img = imresize(img, [self.resize, self.resize]) - return img - - -def imresize(image, size): - dim = image.dim() - if dim == 3: - image = image.unsqueeze(1) - b, _, h, w = image.shape - if size[0] > h: - image = F.interpolate(image, size, mode='bilinear') - elif size[0] < h: - image = F.interpolate(image, size, mode='area') - if dim == 3: - image = image.squeeze(1) - return image - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class DiscriminatorBlock(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels, 0 = first block. - tmp_channels, # Number of intermediate channels. - out_channels, # Number of output channels. - resolution, # Resolution of this block. - img_channels, # Number of input color channels. - first_layer_idx, # Index of the first layer. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - use_fp16 = False, # Use FP16 for this block? - fp16_channels_last = False, # Use channels-last memory format with FP16? - freeze_layers = 0, # Freeze-D: Number of layers to freeze. - ): - assert in_channels in [0, tmp_channels] - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.resolution = resolution - self.img_channels = img_channels - self.first_layer_idx = first_layer_idx - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter)) - - self.num_layers = 0 - def trainable_gen(): - while True: - layer_idx = self.first_layer_idx + self.num_layers - trainable = (layer_idx >= freeze_layers) - self.num_layers += 1 - yield trainable - trainable_iter = trainable_gen() - - if in_channels == 0 or architecture == 'skip': - self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last) - - if architecture == 'resnet': - self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, force_fp32=False): - if (x if x is not None else img).device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - - # Input. - if x is not None: - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # FromRGB. - if self.in_channels == 0 or self.architecture == 'skip': - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) - img = img.to(dtype=dtype, memory_format=memory_format) - y = self.fromrgb(img) - x = x + y if x is not None else y - img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None - - # Main layers. - if self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x) - x = self.conv1(x, gain=np.sqrt(0.5)) - x = y.add_(x) - else: - x = self.conv0(x) - x = self.conv1(x) - - assert x.dtype == dtype - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class MinibatchStdLayer(torch.nn.Module): - def __init__(self, group_size, num_channels=1): - super().__init__() - self.group_size = group_size - self.num_channels = num_channels - - def forward(self, x): - N, C, H, W = x.shape - with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants - G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N - F = self.num_channels - c = C // F - - y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. - y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group. - y = y.square().mean(dim=0) # [nFcHW] Calc variance over group. - y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. - y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels. - y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. - y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels. - x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels. - return x - - def extra_repr(self): - return f'group_size={self.group_size}, num_channels={self.num_channels:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class DiscriminatorEpilogue(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label. - resolution, # Resolution of this block. - img_channels, # Number of input color channels. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch. - mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable. - activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc. - conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping. - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.cmap_dim = cmap_dim - self.resolution = resolution - self.img_channels = img_channels - self.architecture = architecture - - if architecture == 'skip': - self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation) - self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None - self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp) - self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation) - self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim) - - def forward(self, x, img, cmap, force_fp32=False): - misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW] - _ = force_fp32 # unused - dtype = torch.float32 - memory_format = torch.contiguous_format - - # FromRGB. - x = x.to(dtype=dtype, memory_format=memory_format) - if self.architecture == 'skip': - misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution]) - img = img.to(dtype=dtype, memory_format=memory_format) - x = x + self.fromrgb(img) - - # Main layers. - if self.mbstd is not None: - x = self.mbstd(x) - x = self.conv(x) - x = self.fc(x.flatten(1)) - x = self.out(x) - - # Conditioning. - if self.cmap_dim > 0: - misc.assert_shape(cmap, [None, self.cmap_dim]) - x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim)) - - assert x.dtype == dtype - return x - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Discriminator(torch.nn.Module): - def __init__(self, - c_dim, # Conditioning label (C) dimensionality. - img_resolution, # Input resolution. - img_channels, # Number of input color channels. - architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_fp16_res = 4, # Use FP16 for the N highest resolutions. - conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping. - cmap_dim = None, # Dimensionality of mapped conditioning label, None = default. - block_kwargs = {}, # Arguments for DiscriminatorBlock. - mapping_kwargs = {}, # Arguments for MappingNetwork. - epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue. - ): - super().__init__() - self.c_dim = c_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)] - channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]} - fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - if cmap_dim is None: - cmap_dim = channels_dict[4] - if c_dim == 0: - cmap_dim = 0 - - common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp) - cur_layer_idx = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res] if res < img_resolution else 0 - tmp_channels = channels_dict[res] - out_channels = channels_dict[res // 2] - use_fp16 = (res >= fp16_resolution) - block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, - first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs) - setattr(self, f'b{res}', block) - cur_layer_idx += block.num_layers - if c_dim > 0: - self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs) - self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs) - - def forward(self, img, c, update_emas=False, **block_kwargs): - _ = update_emas # unused - x = None - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - x, img = block(x, img, **block_kwargs) - - cmap = None - if self.c_dim > 0: - cmap = self.mapping(None, c) - x = self.b4(x, img, cmap) - return x - - def extra_repr(self): - return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' - -#---------------------------------------------------------------------------- diff --git a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_full.py b/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_full.py deleted file mode 100644 index 7121a22227583b29a6e167b560703e33371f1081..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/mask2former/data/datasets/register_ade20k_full.py +++ /dev/null @@ -1,964 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets import load_sem_seg - -ADE20K_SEM_SEG_FULL_CATEGORIES = [ - {"name": "wall", "id": 2978, "trainId": 0}, - {"name": "building, edifice", "id": 312, "trainId": 1}, - {"name": "sky", "id": 2420, "trainId": 2}, - {"name": "tree", "id": 2855, "trainId": 3}, - {"name": "road, route", "id": 2131, "trainId": 4}, - {"name": "floor, flooring", "id": 976, "trainId": 5}, - {"name": "ceiling", "id": 447, "trainId": 6}, - {"name": "bed", "id": 165, "trainId": 7}, - {"name": "sidewalk, pavement", "id": 2377, "trainId": 8}, - {"name": "earth, ground", "id": 838, "trainId": 9}, - {"name": "cabinet", "id": 350, "trainId": 10}, - {"name": "person, individual, someone, somebody, mortal, soul", "id": 1831, "trainId": 11}, - {"name": "grass", "id": 1125, "trainId": 12}, - {"name": "windowpane, window", "id": 3055, "trainId": 13}, - {"name": "car, auto, automobile, machine, motorcar", "id": 401, "trainId": 14}, - {"name": "mountain, mount", "id": 1610, "trainId": 15}, - {"name": "plant, flora, plant life", "id": 1910, "trainId": 16}, - {"name": "table", "id": 2684, "trainId": 17}, - {"name": "chair", "id": 471, "trainId": 18}, - {"name": "curtain, drape, drapery, mantle, pall", "id": 687, "trainId": 19}, - {"name": "door", "id": 774, "trainId": 20}, - {"name": "sofa, couch, lounge", "id": 2473, "trainId": 21}, - {"name": "sea", "id": 2264, "trainId": 22}, - {"name": "painting, picture", "id": 1735, "trainId": 23}, - {"name": "water", "id": 2994, "trainId": 24}, - {"name": "mirror", "id": 1564, "trainId": 25}, - {"name": "house", "id": 1276, "trainId": 26}, - {"name": "rug, carpet, carpeting", "id": 2178, "trainId": 27}, - {"name": "shelf", "id": 2329, "trainId": 28}, - {"name": "armchair", "id": 57, "trainId": 29}, - {"name": "fence, fencing", "id": 907, "trainId": 30}, - {"name": "field", "id": 913, "trainId": 31}, - {"name": "lamp", "id": 1395, "trainId": 32}, - {"name": "rock, stone", "id": 2138, "trainId": 33}, - {"name": "seat", "id": 2272, "trainId": 34}, - {"name": "river", "id": 2128, "trainId": 35}, - {"name": "desk", "id": 724, "trainId": 36}, - {"name": "bathtub, bathing tub, bath, tub", "id": 155, "trainId": 37}, - {"name": "railing, rail", "id": 2053, "trainId": 38}, - {"name": "signboard, sign", "id": 2380, "trainId": 39}, - {"name": "cushion", "id": 689, "trainId": 40}, - {"name": "path", "id": 1788, "trainId": 41}, - {"name": "work surface", "id": 3087, "trainId": 42}, - {"name": "stairs, steps", "id": 2530, "trainId": 43}, - {"name": "column, pillar", "id": 581, "trainId": 44}, - {"name": "sink", "id": 2388, "trainId": 45}, - {"name": "wardrobe, closet, press", "id": 2985, "trainId": 46}, - {"name": "snow", "id": 2454, "trainId": 47}, - {"name": "refrigerator, icebox", "id": 2096, "trainId": 48}, - {"name": "base, pedestal, stand", "id": 137, "trainId": 49}, - {"name": "bridge, span", "id": 294, "trainId": 50}, - {"name": "blind, screen", "id": 212, "trainId": 51}, - {"name": "runway", "id": 2185, "trainId": 52}, - {"name": "cliff, drop, drop-off", "id": 524, "trainId": 53}, - {"name": "sand", "id": 2212, "trainId": 54}, - {"name": "fireplace, hearth, open fireplace", "id": 943, "trainId": 55}, - {"name": "pillow", "id": 1869, "trainId": 56}, - {"name": "screen door, screen", "id": 2251, "trainId": 57}, - {"name": "toilet, can, commode, crapper, pot, potty, stool, throne", "id": 2793, "trainId": 58}, - {"name": "skyscraper", "id": 2423, "trainId": 59}, - {"name": "grandstand, covered stand", "id": 1121, "trainId": 60}, - {"name": "box", "id": 266, "trainId": 61}, - {"name": "pool table, billiard table, snooker table", "id": 1948, "trainId": 62}, - {"name": "palm, palm tree", "id": 1744, "trainId": 63}, - {"name": "double door", "id": 783, "trainId": 64}, - {"name": "coffee table, cocktail table", "id": 571, "trainId": 65}, - {"name": "counter", "id": 627, "trainId": 66}, - {"name": "countertop", "id": 629, "trainId": 67}, - {"name": "chest of drawers, chest, bureau, dresser", "id": 491, "trainId": 68}, - {"name": "kitchen island", "id": 1374, "trainId": 69}, - {"name": "boat", "id": 223, "trainId": 70}, - {"name": "waterfall, falls", "id": 3016, "trainId": 71}, - { - "name": "stove, kitchen stove, range, kitchen range, cooking stove", - "id": 2598, - "trainId": 72, - }, - {"name": "flower", "id": 978, "trainId": 73}, - {"name": "bookcase", "id": 239, "trainId": 74}, - {"name": "controls", "id": 608, "trainId": 75}, - {"name": "book", "id": 236, "trainId": 76}, - {"name": "stairway, staircase", "id": 2531, "trainId": 77}, - {"name": "streetlight, street lamp", "id": 2616, "trainId": 78}, - { - "name": "computer, computing machine, computing device, data processor, electronic computer, information processing system", - "id": 591, - "trainId": 79, - }, - { - "name": "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, motorcoach, omnibus, passenger vehicle", - "id": 327, - "trainId": 80, - }, - {"name": "swivel chair", "id": 2679, "trainId": 81}, - {"name": "light, light source", "id": 1451, "trainId": 82}, - {"name": "bench", "id": 181, "trainId": 83}, - {"name": "case, display case, showcase, vitrine", "id": 420, "trainId": 84}, - {"name": "towel", "id": 2821, "trainId": 85}, - {"name": "fountain", "id": 1023, "trainId": 86}, - {"name": "embankment", "id": 855, "trainId": 87}, - { - "name": "television receiver, television, television set, tv, tv set, idiot box, boob tube, telly, goggle box", - "id": 2733, - "trainId": 88, - }, - {"name": "van", "id": 2928, "trainId": 89}, - {"name": "hill", "id": 1240, "trainId": 90}, - {"name": "awning, sunshade, sunblind", "id": 77, "trainId": 91}, - {"name": "poster, posting, placard, notice, bill, card", "id": 1969, "trainId": 92}, - {"name": "truck, motortruck", "id": 2880, "trainId": 93}, - {"name": "airplane, aeroplane, plane", "id": 14, "trainId": 94}, - {"name": "pole", "id": 1936, "trainId": 95}, - {"name": "tower", "id": 2828, "trainId": 96}, - {"name": "court", "id": 631, "trainId": 97}, - {"name": "ball", "id": 103, "trainId": 98}, - { - "name": "aircraft carrier, carrier, flattop, attack aircraft carrier", - "id": 3144, - "trainId": 99, - }, - {"name": "buffet, counter, sideboard", "id": 308, "trainId": 100}, - {"name": "hovel, hut, hutch, shack, shanty", "id": 1282, "trainId": 101}, - {"name": "apparel, wearing apparel, dress, clothes", "id": 38, "trainId": 102}, - {"name": "minibike, motorbike", "id": 1563, "trainId": 103}, - {"name": "animal, animate being, beast, brute, creature, fauna", "id": 29, "trainId": 104}, - {"name": "chandelier, pendant, pendent", "id": 480, "trainId": 105}, - {"name": "step, stair", "id": 2569, "trainId": 106}, - {"name": "booth, cubicle, stall, kiosk", "id": 247, "trainId": 107}, - {"name": "bicycle, bike, wheel, cycle", "id": 187, "trainId": 108}, - {"name": "doorframe, doorcase", "id": 778, "trainId": 109}, - {"name": "sconce", "id": 2243, "trainId": 110}, - {"name": "pond", "id": 1941, "trainId": 111}, - {"name": "trade name, brand name, brand, marque", "id": 2833, "trainId": 112}, - {"name": "bannister, banister, balustrade, balusters, handrail", "id": 120, "trainId": 113}, - {"name": "bag", "id": 95, "trainId": 114}, - {"name": "traffic light, traffic signal, stoplight", "id": 2836, "trainId": 115}, - {"name": "gazebo", "id": 1087, "trainId": 116}, - {"name": "escalator, moving staircase, moving stairway", "id": 868, "trainId": 117}, - {"name": "land, ground, soil", "id": 1401, "trainId": 118}, - {"name": "board, plank", "id": 220, "trainId": 119}, - {"name": "arcade machine", "id": 47, "trainId": 120}, - {"name": "eiderdown, duvet, continental quilt", "id": 843, "trainId": 121}, - {"name": "bar", "id": 123, "trainId": 122}, - {"name": "stall, stand, sales booth", "id": 2537, "trainId": 123}, - {"name": "playground", "id": 1927, "trainId": 124}, - {"name": "ship", "id": 2337, "trainId": 125}, - {"name": "ottoman, pouf, pouffe, puff, hassock", "id": 1702, "trainId": 126}, - { - "name": "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", - "id": 64, - "trainId": 127, - }, - {"name": "bottle", "id": 249, "trainId": 128}, - {"name": "cradle", "id": 642, "trainId": 129}, - {"name": "pot, flowerpot", "id": 1981, "trainId": 130}, - { - "name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter", - "id": 609, - "trainId": 131, - }, - {"name": "train, railroad train", "id": 2840, "trainId": 132}, - {"name": "stool", "id": 2586, "trainId": 133}, - {"name": "lake", "id": 1393, "trainId": 134}, - {"name": "tank, storage tank", "id": 2704, "trainId": 135}, - {"name": "ice, water ice", "id": 1304, "trainId": 136}, - {"name": "basket, handbasket", "id": 146, "trainId": 137}, - {"name": "manhole", "id": 1494, "trainId": 138}, - {"name": "tent, collapsible shelter", "id": 2739, "trainId": 139}, - {"name": "canopy", "id": 389, "trainId": 140}, - {"name": "microwave, microwave oven", "id": 1551, "trainId": 141}, - {"name": "barrel, cask", "id": 131, "trainId": 142}, - {"name": "dirt track", "id": 738, "trainId": 143}, - {"name": "beam", "id": 161, "trainId": 144}, - {"name": "dishwasher, dish washer, dishwashing machine", "id": 747, "trainId": 145}, - {"name": "plate", "id": 1919, "trainId": 146}, - {"name": "screen, crt screen", "id": 3109, "trainId": 147}, - {"name": "ruins", "id": 2179, "trainId": 148}, - {"name": "washer, automatic washer, washing machine", "id": 2989, "trainId": 149}, - {"name": "blanket, cover", "id": 206, "trainId": 150}, - {"name": "plaything, toy", "id": 1930, "trainId": 151}, - {"name": "food, solid food", "id": 1002, "trainId": 152}, - {"name": "screen, silver screen, projection screen", "id": 2254, "trainId": 153}, - {"name": "oven", "id": 1708, "trainId": 154}, - {"name": "stage", "id": 2526, "trainId": 155}, - {"name": "beacon, lighthouse, beacon light, pharos", "id": 160, "trainId": 156}, - {"name": "umbrella", "id": 2901, "trainId": 157}, - {"name": "sculpture", "id": 2262, "trainId": 158}, - {"name": "aqueduct", "id": 44, "trainId": 159}, - {"name": "container", "id": 597, "trainId": 160}, - {"name": "scaffolding, staging", "id": 2235, "trainId": 161}, - {"name": "hood, exhaust hood", "id": 1260, "trainId": 162}, - {"name": "curb, curbing, kerb", "id": 682, "trainId": 163}, - {"name": "roller coaster", "id": 2151, "trainId": 164}, - {"name": "horse, equus caballus", "id": 3107, "trainId": 165}, - {"name": "catwalk", "id": 432, "trainId": 166}, - {"name": "glass, drinking glass", "id": 1098, "trainId": 167}, - {"name": "vase", "id": 2932, "trainId": 168}, - {"name": "central reservation", "id": 461, "trainId": 169}, - {"name": "carousel", "id": 410, "trainId": 170}, - {"name": "radiator", "id": 2046, "trainId": 171}, - {"name": "closet", "id": 533, "trainId": 172}, - {"name": "machine", "id": 1481, "trainId": 173}, - {"name": "pier, wharf, wharfage, dock", "id": 1858, "trainId": 174}, - {"name": "fan", "id": 894, "trainId": 175}, - {"name": "inflatable bounce game", "id": 1322, "trainId": 176}, - {"name": "pitch", "id": 1891, "trainId": 177}, - {"name": "paper", "id": 1756, "trainId": 178}, - {"name": "arcade, colonnade", "id": 49, "trainId": 179}, - {"name": "hot tub", "id": 1272, "trainId": 180}, - {"name": "helicopter", "id": 1229, "trainId": 181}, - {"name": "tray", "id": 2850, "trainId": 182}, - {"name": "partition, divider", "id": 1784, "trainId": 183}, - {"name": "vineyard", "id": 2962, "trainId": 184}, - {"name": "bowl", "id": 259, "trainId": 185}, - {"name": "bullring", "id": 319, "trainId": 186}, - {"name": "flag", "id": 954, "trainId": 187}, - {"name": "pot", "id": 1974, "trainId": 188}, - {"name": "footbridge, overcrossing, pedestrian bridge", "id": 1013, "trainId": 189}, - {"name": "shower", "id": 2356, "trainId": 190}, - {"name": "bag, traveling bag, travelling bag, grip, suitcase", "id": 97, "trainId": 191}, - {"name": "bulletin board, notice board", "id": 318, "trainId": 192}, - {"name": "confessional booth", "id": 592, "trainId": 193}, - {"name": "trunk, tree trunk, bole", "id": 2885, "trainId": 194}, - {"name": "forest", "id": 1017, "trainId": 195}, - {"name": "elevator door", "id": 851, "trainId": 196}, - {"name": "laptop, laptop computer", "id": 1407, "trainId": 197}, - {"name": "instrument panel", "id": 1332, "trainId": 198}, - {"name": "bucket, pail", "id": 303, "trainId": 199}, - {"name": "tapestry, tapis", "id": 2714, "trainId": 200}, - {"name": "platform", "id": 1924, "trainId": 201}, - {"name": "jacket", "id": 1346, "trainId": 202}, - {"name": "gate", "id": 1081, "trainId": 203}, - {"name": "monitor, monitoring device", "id": 1583, "trainId": 204}, - { - "name": "telephone booth, phone booth, call box, telephone box, telephone kiosk", - "id": 2727, - "trainId": 205, - }, - {"name": "spotlight, spot", "id": 2509, "trainId": 206}, - {"name": "ring", "id": 2123, "trainId": 207}, - {"name": "control panel", "id": 602, "trainId": 208}, - {"name": "blackboard, chalkboard", "id": 202, "trainId": 209}, - {"name": "air conditioner, air conditioning", "id": 10, "trainId": 210}, - {"name": "chest", "id": 490, "trainId": 211}, - {"name": "clock", "id": 530, "trainId": 212}, - {"name": "sand dune", "id": 2213, "trainId": 213}, - {"name": "pipe, pipage, piping", "id": 1884, "trainId": 214}, - {"name": "vault", "id": 2934, "trainId": 215}, - {"name": "table football", "id": 2687, "trainId": 216}, - {"name": "cannon", "id": 387, "trainId": 217}, - {"name": "swimming pool, swimming bath, natatorium", "id": 2668, "trainId": 218}, - {"name": "fluorescent, fluorescent fixture", "id": 982, "trainId": 219}, - {"name": "statue", "id": 2547, "trainId": 220}, - { - "name": "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", - "id": 1474, - "trainId": 221, - }, - {"name": "exhibitor", "id": 877, "trainId": 222}, - {"name": "ladder", "id": 1391, "trainId": 223}, - {"name": "carport", "id": 414, "trainId": 224}, - {"name": "dam", "id": 698, "trainId": 225}, - {"name": "pulpit", "id": 2019, "trainId": 226}, - {"name": "skylight, fanlight", "id": 2422, "trainId": 227}, - {"name": "water tower", "id": 3010, "trainId": 228}, - {"name": "grill, grille, grillwork", "id": 1139, "trainId": 229}, - {"name": "display board", "id": 753, "trainId": 230}, - {"name": "pane, pane of glass, window glass", "id": 1747, "trainId": 231}, - {"name": "rubbish, trash, scrap", "id": 2175, "trainId": 232}, - {"name": "ice rink", "id": 1301, "trainId": 233}, - {"name": "fruit", "id": 1033, "trainId": 234}, - {"name": "patio", "id": 1789, "trainId": 235}, - {"name": "vending machine", "id": 2939, "trainId": 236}, - {"name": "telephone, phone, telephone set", "id": 2730, "trainId": 237}, - {"name": "net", "id": 1652, "trainId": 238}, - { - "name": "backpack, back pack, knapsack, packsack, rucksack, haversack", - "id": 90, - "trainId": 239, - }, - {"name": "jar", "id": 1349, "trainId": 240}, - {"name": "track", "id": 2830, "trainId": 241}, - {"name": "magazine", "id": 1485, "trainId": 242}, - {"name": "shutter", "id": 2370, "trainId": 243}, - {"name": "roof", "id": 2155, "trainId": 244}, - {"name": "banner, streamer", "id": 118, "trainId": 245}, - {"name": "landfill", "id": 1402, "trainId": 246}, - {"name": "post", "id": 1957, "trainId": 247}, - {"name": "altarpiece, reredos", "id": 3130, "trainId": 248}, - {"name": "hat, chapeau, lid", "id": 1197, "trainId": 249}, - {"name": "arch, archway", "id": 52, "trainId": 250}, - {"name": "table game", "id": 2688, "trainId": 251}, - {"name": "bag, handbag, pocketbook, purse", "id": 96, "trainId": 252}, - {"name": "document, written document, papers", "id": 762, "trainId": 253}, - {"name": "dome", "id": 772, "trainId": 254}, - {"name": "pier", "id": 1857, "trainId": 255}, - {"name": "shanties", "id": 2315, "trainId": 256}, - {"name": "forecourt", "id": 1016, "trainId": 257}, - {"name": "crane", "id": 643, "trainId": 258}, - {"name": "dog, domestic dog, canis familiaris", "id": 3105, "trainId": 259}, - {"name": "piano, pianoforte, forte-piano", "id": 1849, "trainId": 260}, - {"name": "drawing", "id": 791, "trainId": 261}, - {"name": "cabin", "id": 349, "trainId": 262}, - { - "name": "ad, advertisement, advertizement, advertising, advertizing, advert", - "id": 6, - "trainId": 263, - }, - {"name": "amphitheater, amphitheatre, coliseum", "id": 3114, "trainId": 264}, - {"name": "monument", "id": 1587, "trainId": 265}, - {"name": "henhouse", "id": 1233, "trainId": 266}, - {"name": "cockpit", "id": 559, "trainId": 267}, - {"name": "heater, warmer", "id": 1223, "trainId": 268}, - {"name": "windmill, aerogenerator, wind generator", "id": 3049, "trainId": 269}, - {"name": "pool", "id": 1943, "trainId": 270}, - {"name": "elevator, lift", "id": 853, "trainId": 271}, - {"name": "decoration, ornament, ornamentation", "id": 709, "trainId": 272}, - {"name": "labyrinth", "id": 1390, "trainId": 273}, - {"name": "text, textual matter", "id": 2748, "trainId": 274}, - {"name": "printer", "id": 2007, "trainId": 275}, - {"name": "mezzanine, first balcony", "id": 1546, "trainId": 276}, - {"name": "mattress", "id": 1513, "trainId": 277}, - {"name": "straw", "id": 2600, "trainId": 278}, - {"name": "stalls", "id": 2538, "trainId": 279}, - {"name": "patio, terrace", "id": 1790, "trainId": 280}, - {"name": "billboard, hoarding", "id": 194, "trainId": 281}, - {"name": "bus stop", "id": 326, "trainId": 282}, - {"name": "trouser, pant", "id": 2877, "trainId": 283}, - {"name": "console table, console", "id": 594, "trainId": 284}, - {"name": "rack", "id": 2036, "trainId": 285}, - {"name": "notebook", "id": 1662, "trainId": 286}, - {"name": "shrine", "id": 2366, "trainId": 287}, - {"name": "pantry", "id": 1754, "trainId": 288}, - {"name": "cart", "id": 418, "trainId": 289}, - {"name": "steam shovel", "id": 2553, "trainId": 290}, - {"name": "porch", "id": 1951, "trainId": 291}, - {"name": "postbox, mailbox, letter box", "id": 1963, "trainId": 292}, - {"name": "figurine, statuette", "id": 918, "trainId": 293}, - {"name": "recycling bin", "id": 2086, "trainId": 294}, - {"name": "folding screen", "id": 997, "trainId": 295}, - {"name": "telescope", "id": 2731, "trainId": 296}, - {"name": "deck chair, beach chair", "id": 704, "trainId": 297}, - {"name": "kennel", "id": 1365, "trainId": 298}, - {"name": "coffee maker", "id": 569, "trainId": 299}, - {"name": "altar, communion table, lord's table", "id": 3108, "trainId": 300}, - {"name": "fish", "id": 948, "trainId": 301}, - {"name": "easel", "id": 839, "trainId": 302}, - {"name": "artificial golf green", "id": 63, "trainId": 303}, - {"name": "iceberg", "id": 1305, "trainId": 304}, - {"name": "candlestick, candle holder", "id": 378, "trainId": 305}, - {"name": "shower stall, shower bath", "id": 2362, "trainId": 306}, - {"name": "television stand", "id": 2734, "trainId": 307}, - { - "name": "wall socket, wall plug, electric outlet, electrical outlet, outlet, electric receptacle", - "id": 2982, - "trainId": 308, - }, - {"name": "skeleton", "id": 2398, "trainId": 309}, - {"name": "grand piano, grand", "id": 1119, "trainId": 310}, - {"name": "candy, confect", "id": 382, "trainId": 311}, - {"name": "grille door", "id": 1141, "trainId": 312}, - {"name": "pedestal, plinth, footstall", "id": 1805, "trainId": 313}, - {"name": "jersey, t-shirt, tee shirt", "id": 3102, "trainId": 314}, - {"name": "shoe", "id": 2341, "trainId": 315}, - {"name": "gravestone, headstone, tombstone", "id": 1131, "trainId": 316}, - {"name": "shanty", "id": 2316, "trainId": 317}, - {"name": "structure", "id": 2626, "trainId": 318}, - {"name": "rocking chair, rocker", "id": 3104, "trainId": 319}, - {"name": "bird", "id": 198, "trainId": 320}, - {"name": "place mat", "id": 1896, "trainId": 321}, - {"name": "tomb", "id": 2800, "trainId": 322}, - {"name": "big top", "id": 190, "trainId": 323}, - {"name": "gas pump, gasoline pump, petrol pump, island dispenser", "id": 3131, "trainId": 324}, - {"name": "lockers", "id": 1463, "trainId": 325}, - {"name": "cage", "id": 357, "trainId": 326}, - {"name": "finger", "id": 929, "trainId": 327}, - {"name": "bleachers", "id": 209, "trainId": 328}, - {"name": "ferris wheel", "id": 912, "trainId": 329}, - {"name": "hairdresser chair", "id": 1164, "trainId": 330}, - {"name": "mat", "id": 1509, "trainId": 331}, - {"name": "stands", "id": 2539, "trainId": 332}, - {"name": "aquarium, fish tank, marine museum", "id": 3116, "trainId": 333}, - {"name": "streetcar, tram, tramcar, trolley, trolley car", "id": 2615, "trainId": 334}, - {"name": "napkin, table napkin, serviette", "id": 1644, "trainId": 335}, - {"name": "dummy", "id": 818, "trainId": 336}, - {"name": "booklet, brochure, folder, leaflet, pamphlet", "id": 242, "trainId": 337}, - {"name": "sand trap", "id": 2217, "trainId": 338}, - {"name": "shop, store", "id": 2347, "trainId": 339}, - {"name": "table cloth", "id": 2686, "trainId": 340}, - {"name": "service station", "id": 2300, "trainId": 341}, - {"name": "coffin", "id": 572, "trainId": 342}, - {"name": "drawer", "id": 789, "trainId": 343}, - {"name": "cages", "id": 358, "trainId": 344}, - {"name": "slot machine, coin machine", "id": 2443, "trainId": 345}, - {"name": "balcony", "id": 101, "trainId": 346}, - {"name": "volleyball court", "id": 2969, "trainId": 347}, - {"name": "table tennis", "id": 2692, "trainId": 348}, - {"name": "control table", "id": 606, "trainId": 349}, - {"name": "shirt", "id": 2339, "trainId": 350}, - {"name": "merchandise, ware, product", "id": 1533, "trainId": 351}, - {"name": "railway", "id": 2060, "trainId": 352}, - {"name": "parterre", "id": 1782, "trainId": 353}, - {"name": "chimney", "id": 495, "trainId": 354}, - {"name": "can, tin, tin can", "id": 371, "trainId": 355}, - {"name": "tanks", "id": 2707, "trainId": 356}, - {"name": "fabric, cloth, material, textile", "id": 889, "trainId": 357}, - {"name": "alga, algae", "id": 3156, "trainId": 358}, - {"name": "system", "id": 2683, "trainId": 359}, - {"name": "map", "id": 1499, "trainId": 360}, - {"name": "greenhouse", "id": 1135, "trainId": 361}, - {"name": "mug", "id": 1619, "trainId": 362}, - {"name": "barbecue", "id": 125, "trainId": 363}, - {"name": "trailer", "id": 2838, "trainId": 364}, - {"name": "toilet tissue, toilet paper, bathroom tissue", "id": 2792, "trainId": 365}, - {"name": "organ", "id": 1695, "trainId": 366}, - {"name": "dishrag, dishcloth", "id": 746, "trainId": 367}, - {"name": "island", "id": 1343, "trainId": 368}, - {"name": "keyboard", "id": 1370, "trainId": 369}, - {"name": "trench", "id": 2858, "trainId": 370}, - {"name": "basket, basketball hoop, hoop", "id": 145, "trainId": 371}, - {"name": "steering wheel, wheel", "id": 2565, "trainId": 372}, - {"name": "pitcher, ewer", "id": 1892, "trainId": 373}, - {"name": "goal", "id": 1103, "trainId": 374}, - {"name": "bread, breadstuff, staff of life", "id": 286, "trainId": 375}, - {"name": "beds", "id": 170, "trainId": 376}, - {"name": "wood", "id": 3073, "trainId": 377}, - {"name": "file cabinet", "id": 922, "trainId": 378}, - {"name": "newspaper, paper", "id": 1655, "trainId": 379}, - {"name": "motorboat", "id": 1602, "trainId": 380}, - {"name": "rope", "id": 2160, "trainId": 381}, - {"name": "guitar", "id": 1151, "trainId": 382}, - {"name": "rubble", "id": 2176, "trainId": 383}, - {"name": "scarf", "id": 2239, "trainId": 384}, - {"name": "barrels", "id": 132, "trainId": 385}, - {"name": "cap", "id": 394, "trainId": 386}, - {"name": "leaves", "id": 1424, "trainId": 387}, - {"name": "control tower", "id": 607, "trainId": 388}, - {"name": "dashboard", "id": 700, "trainId": 389}, - {"name": "bandstand", "id": 116, "trainId": 390}, - {"name": "lectern", "id": 1425, "trainId": 391}, - {"name": "switch, electric switch, electrical switch", "id": 2676, "trainId": 392}, - {"name": "baseboard, mopboard, skirting board", "id": 141, "trainId": 393}, - {"name": "shower room", "id": 2360, "trainId": 394}, - {"name": "smoke", "id": 2449, "trainId": 395}, - {"name": "faucet, spigot", "id": 897, "trainId": 396}, - {"name": "bulldozer", "id": 317, "trainId": 397}, - {"name": "saucepan", "id": 2228, "trainId": 398}, - {"name": "shops", "id": 2351, "trainId": 399}, - {"name": "meter", "id": 1543, "trainId": 400}, - {"name": "crevasse", "id": 656, "trainId": 401}, - {"name": "gear", "id": 1088, "trainId": 402}, - {"name": "candelabrum, candelabra", "id": 373, "trainId": 403}, - {"name": "sofa bed", "id": 2472, "trainId": 404}, - {"name": "tunnel", "id": 2892, "trainId": 405}, - {"name": "pallet", "id": 1740, "trainId": 406}, - {"name": "wire, conducting wire", "id": 3067, "trainId": 407}, - {"name": "kettle, boiler", "id": 1367, "trainId": 408}, - {"name": "bidet", "id": 188, "trainId": 409}, - { - "name": "baby buggy, baby carriage, carriage, perambulator, pram, stroller, go-cart, pushchair, pusher", - "id": 79, - "trainId": 410, - }, - {"name": "music stand", "id": 1633, "trainId": 411}, - {"name": "pipe, tube", "id": 1885, "trainId": 412}, - {"name": "cup", "id": 677, "trainId": 413}, - {"name": "parking meter", "id": 1779, "trainId": 414}, - {"name": "ice hockey rink", "id": 1297, "trainId": 415}, - {"name": "shelter", "id": 2334, "trainId": 416}, - {"name": "weeds", "id": 3027, "trainId": 417}, - {"name": "temple", "id": 2735, "trainId": 418}, - {"name": "patty, cake", "id": 1791, "trainId": 419}, - {"name": "ski slope", "id": 2405, "trainId": 420}, - {"name": "panel", "id": 1748, "trainId": 421}, - {"name": "wallet", "id": 2983, "trainId": 422}, - {"name": "wheel", "id": 3035, "trainId": 423}, - {"name": "towel rack, towel horse", "id": 2824, "trainId": 424}, - {"name": "roundabout", "id": 2168, "trainId": 425}, - {"name": "canister, cannister, tin", "id": 385, "trainId": 426}, - {"name": "rod", "id": 2148, "trainId": 427}, - {"name": "soap dispenser", "id": 2465, "trainId": 428}, - {"name": "bell", "id": 175, "trainId": 429}, - {"name": "canvas", "id": 390, "trainId": 430}, - {"name": "box office, ticket office, ticket booth", "id": 268, "trainId": 431}, - {"name": "teacup", "id": 2722, "trainId": 432}, - {"name": "trellis", "id": 2857, "trainId": 433}, - {"name": "workbench", "id": 3088, "trainId": 434}, - {"name": "valley, vale", "id": 2926, "trainId": 435}, - {"name": "toaster", "id": 2782, "trainId": 436}, - {"name": "knife", "id": 1378, "trainId": 437}, - {"name": "podium", "id": 1934, "trainId": 438}, - {"name": "ramp", "id": 2072, "trainId": 439}, - {"name": "tumble dryer", "id": 2889, "trainId": 440}, - {"name": "fireplug, fire hydrant, plug", "id": 944, "trainId": 441}, - {"name": "gym shoe, sneaker, tennis shoe", "id": 1158, "trainId": 442}, - {"name": "lab bench", "id": 1383, "trainId": 443}, - {"name": "equipment", "id": 867, "trainId": 444}, - {"name": "rocky formation", "id": 2145, "trainId": 445}, - {"name": "plastic", "id": 1915, "trainId": 446}, - {"name": "calendar", "id": 361, "trainId": 447}, - {"name": "caravan", "id": 402, "trainId": 448}, - {"name": "check-in-desk", "id": 482, "trainId": 449}, - {"name": "ticket counter", "id": 2761, "trainId": 450}, - {"name": "brush", "id": 300, "trainId": 451}, - {"name": "mill", "id": 1554, "trainId": 452}, - {"name": "covered bridge", "id": 636, "trainId": 453}, - {"name": "bowling alley", "id": 260, "trainId": 454}, - {"name": "hanger", "id": 1186, "trainId": 455}, - {"name": "excavator", "id": 871, "trainId": 456}, - {"name": "trestle", "id": 2859, "trainId": 457}, - {"name": "revolving door", "id": 2103, "trainId": 458}, - {"name": "blast furnace", "id": 208, "trainId": 459}, - {"name": "scale, weighing machine", "id": 2236, "trainId": 460}, - {"name": "projector", "id": 2012, "trainId": 461}, - {"name": "soap", "id": 2462, "trainId": 462}, - {"name": "locker", "id": 1462, "trainId": 463}, - {"name": "tractor", "id": 2832, "trainId": 464}, - {"name": "stretcher", "id": 2617, "trainId": 465}, - {"name": "frame", "id": 1024, "trainId": 466}, - {"name": "grating", "id": 1129, "trainId": 467}, - {"name": "alembic", "id": 18, "trainId": 468}, - {"name": "candle, taper, wax light", "id": 376, "trainId": 469}, - {"name": "barrier", "id": 134, "trainId": 470}, - {"name": "cardboard", "id": 407, "trainId": 471}, - {"name": "cave", "id": 434, "trainId": 472}, - {"name": "puddle", "id": 2017, "trainId": 473}, - {"name": "tarp", "id": 2717, "trainId": 474}, - {"name": "price tag", "id": 2005, "trainId": 475}, - {"name": "watchtower", "id": 2993, "trainId": 476}, - {"name": "meters", "id": 1545, "trainId": 477}, - { - "name": "light bulb, lightbulb, bulb, incandescent lamp, electric light, electric-light bulb", - "id": 1445, - "trainId": 478, - }, - {"name": "tracks", "id": 2831, "trainId": 479}, - {"name": "hair dryer", "id": 1161, "trainId": 480}, - {"name": "skirt", "id": 2411, "trainId": 481}, - {"name": "viaduct", "id": 2949, "trainId": 482}, - {"name": "paper towel", "id": 1769, "trainId": 483}, - {"name": "coat", "id": 552, "trainId": 484}, - {"name": "sheet", "id": 2327, "trainId": 485}, - {"name": "fire extinguisher, extinguisher, asphyxiator", "id": 939, "trainId": 486}, - {"name": "water wheel", "id": 3013, "trainId": 487}, - {"name": "pottery, clayware", "id": 1986, "trainId": 488}, - {"name": "magazine rack", "id": 1486, "trainId": 489}, - {"name": "teapot", "id": 2723, "trainId": 490}, - {"name": "microphone, mike", "id": 1549, "trainId": 491}, - {"name": "support", "id": 2649, "trainId": 492}, - {"name": "forklift", "id": 1020, "trainId": 493}, - {"name": "canyon", "id": 392, "trainId": 494}, - {"name": "cash register, register", "id": 422, "trainId": 495}, - {"name": "leaf, leafage, foliage", "id": 1419, "trainId": 496}, - {"name": "remote control, remote", "id": 2099, "trainId": 497}, - {"name": "soap dish", "id": 2464, "trainId": 498}, - {"name": "windshield, windscreen", "id": 3058, "trainId": 499}, - {"name": "cat", "id": 430, "trainId": 500}, - {"name": "cue, cue stick, pool cue, pool stick", "id": 675, "trainId": 501}, - {"name": "vent, venthole, vent-hole, blowhole", "id": 2941, "trainId": 502}, - {"name": "videos", "id": 2955, "trainId": 503}, - {"name": "shovel", "id": 2355, "trainId": 504}, - {"name": "eaves", "id": 840, "trainId": 505}, - {"name": "antenna, aerial, transmitting aerial", "id": 32, "trainId": 506}, - {"name": "shipyard", "id": 2338, "trainId": 507}, - {"name": "hen, biddy", "id": 1232, "trainId": 508}, - {"name": "traffic cone", "id": 2834, "trainId": 509}, - {"name": "washing machines", "id": 2991, "trainId": 510}, - {"name": "truck crane", "id": 2879, "trainId": 511}, - {"name": "cds", "id": 444, "trainId": 512}, - {"name": "niche", "id": 1657, "trainId": 513}, - {"name": "scoreboard", "id": 2246, "trainId": 514}, - {"name": "briefcase", "id": 296, "trainId": 515}, - {"name": "boot", "id": 245, "trainId": 516}, - {"name": "sweater, jumper", "id": 2661, "trainId": 517}, - {"name": "hay", "id": 1202, "trainId": 518}, - {"name": "pack", "id": 1714, "trainId": 519}, - {"name": "bottle rack", "id": 251, "trainId": 520}, - {"name": "glacier", "id": 1095, "trainId": 521}, - {"name": "pergola", "id": 1828, "trainId": 522}, - {"name": "building materials", "id": 311, "trainId": 523}, - {"name": "television camera", "id": 2732, "trainId": 524}, - {"name": "first floor", "id": 947, "trainId": 525}, - {"name": "rifle", "id": 2115, "trainId": 526}, - {"name": "tennis table", "id": 2738, "trainId": 527}, - {"name": "stadium", "id": 2525, "trainId": 528}, - {"name": "safety belt", "id": 2194, "trainId": 529}, - {"name": "cover", "id": 634, "trainId": 530}, - {"name": "dish rack", "id": 740, "trainId": 531}, - {"name": "synthesizer", "id": 2682, "trainId": 532}, - {"name": "pumpkin", "id": 2020, "trainId": 533}, - {"name": "gutter", "id": 1156, "trainId": 534}, - {"name": "fruit stand", "id": 1036, "trainId": 535}, - {"name": "ice floe, floe", "id": 1295, "trainId": 536}, - {"name": "handle, grip, handgrip, hold", "id": 1181, "trainId": 537}, - {"name": "wheelchair", "id": 3037, "trainId": 538}, - {"name": "mousepad, mouse mat", "id": 1614, "trainId": 539}, - {"name": "diploma", "id": 736, "trainId": 540}, - {"name": "fairground ride", "id": 893, "trainId": 541}, - {"name": "radio", "id": 2047, "trainId": 542}, - {"name": "hotplate", "id": 1274, "trainId": 543}, - {"name": "junk", "id": 1361, "trainId": 544}, - {"name": "wheelbarrow", "id": 3036, "trainId": 545}, - {"name": "stream", "id": 2606, "trainId": 546}, - {"name": "toll plaza", "id": 2797, "trainId": 547}, - {"name": "punching bag", "id": 2022, "trainId": 548}, - {"name": "trough", "id": 2876, "trainId": 549}, - {"name": "throne", "id": 2758, "trainId": 550}, - {"name": "chair desk", "id": 472, "trainId": 551}, - {"name": "weighbridge", "id": 3028, "trainId": 552}, - {"name": "extractor fan", "id": 882, "trainId": 553}, - {"name": "hanging clothes", "id": 1189, "trainId": 554}, - {"name": "dish, dish aerial, dish antenna, saucer", "id": 743, "trainId": 555}, - {"name": "alarm clock, alarm", "id": 3122, "trainId": 556}, - {"name": "ski lift", "id": 2401, "trainId": 557}, - {"name": "chain", "id": 468, "trainId": 558}, - {"name": "garage", "id": 1061, "trainId": 559}, - {"name": "mechanical shovel", "id": 1523, "trainId": 560}, - {"name": "wine rack", "id": 3059, "trainId": 561}, - {"name": "tramway", "id": 2843, "trainId": 562}, - {"name": "treadmill", "id": 2853, "trainId": 563}, - {"name": "menu", "id": 1529, "trainId": 564}, - {"name": "block", "id": 214, "trainId": 565}, - {"name": "well", "id": 3032, "trainId": 566}, - {"name": "witness stand", "id": 3071, "trainId": 567}, - {"name": "branch", "id": 277, "trainId": 568}, - {"name": "duck", "id": 813, "trainId": 569}, - {"name": "casserole", "id": 426, "trainId": 570}, - {"name": "frying pan", "id": 1039, "trainId": 571}, - {"name": "desk organizer", "id": 727, "trainId": 572}, - {"name": "mast", "id": 1508, "trainId": 573}, - {"name": "spectacles, specs, eyeglasses, glasses", "id": 2490, "trainId": 574}, - {"name": "service elevator", "id": 2299, "trainId": 575}, - {"name": "dollhouse", "id": 768, "trainId": 576}, - {"name": "hammock", "id": 1172, "trainId": 577}, - {"name": "clothes hanging", "id": 537, "trainId": 578}, - {"name": "photocopier", "id": 1847, "trainId": 579}, - {"name": "notepad", "id": 1664, "trainId": 580}, - {"name": "golf cart", "id": 1110, "trainId": 581}, - {"name": "footpath", "id": 1014, "trainId": 582}, - {"name": "cross", "id": 662, "trainId": 583}, - {"name": "baptismal font", "id": 121, "trainId": 584}, - {"name": "boiler", "id": 227, "trainId": 585}, - {"name": "skip", "id": 2410, "trainId": 586}, - {"name": "rotisserie", "id": 2165, "trainId": 587}, - {"name": "tables", "id": 2696, "trainId": 588}, - {"name": "water mill", "id": 3005, "trainId": 589}, - {"name": "helmet", "id": 1231, "trainId": 590}, - {"name": "cover curtain", "id": 635, "trainId": 591}, - {"name": "brick", "id": 292, "trainId": 592}, - {"name": "table runner", "id": 2690, "trainId": 593}, - {"name": "ashtray", "id": 65, "trainId": 594}, - {"name": "street box", "id": 2607, "trainId": 595}, - {"name": "stick", "id": 2574, "trainId": 596}, - {"name": "hangers", "id": 1188, "trainId": 597}, - {"name": "cells", "id": 456, "trainId": 598}, - {"name": "urinal", "id": 2913, "trainId": 599}, - {"name": "centerpiece", "id": 459, "trainId": 600}, - {"name": "portable fridge", "id": 1955, "trainId": 601}, - {"name": "dvds", "id": 827, "trainId": 602}, - {"name": "golf club", "id": 1111, "trainId": 603}, - {"name": "skirting board", "id": 2412, "trainId": 604}, - {"name": "water cooler", "id": 2997, "trainId": 605}, - {"name": "clipboard", "id": 528, "trainId": 606}, - {"name": "camera, photographic camera", "id": 366, "trainId": 607}, - {"name": "pigeonhole", "id": 1863, "trainId": 608}, - {"name": "chips", "id": 500, "trainId": 609}, - {"name": "food processor", "id": 1001, "trainId": 610}, - {"name": "post box", "id": 1958, "trainId": 611}, - {"name": "lid", "id": 1441, "trainId": 612}, - {"name": "drum", "id": 809, "trainId": 613}, - {"name": "blender", "id": 210, "trainId": 614}, - {"name": "cave entrance", "id": 435, "trainId": 615}, - {"name": "dental chair", "id": 718, "trainId": 616}, - {"name": "obelisk", "id": 1674, "trainId": 617}, - {"name": "canoe", "id": 388, "trainId": 618}, - {"name": "mobile", "id": 1572, "trainId": 619}, - {"name": "monitors", "id": 1584, "trainId": 620}, - {"name": "pool ball", "id": 1944, "trainId": 621}, - {"name": "cue rack", "id": 674, "trainId": 622}, - {"name": "baggage carts", "id": 99, "trainId": 623}, - {"name": "shore", "id": 2352, "trainId": 624}, - {"name": "fork", "id": 1019, "trainId": 625}, - {"name": "paper filer", "id": 1763, "trainId": 626}, - {"name": "bicycle rack", "id": 185, "trainId": 627}, - {"name": "coat rack", "id": 554, "trainId": 628}, - {"name": "garland", "id": 1066, "trainId": 629}, - {"name": "sports bag", "id": 2508, "trainId": 630}, - {"name": "fish tank", "id": 951, "trainId": 631}, - {"name": "towel dispenser", "id": 2822, "trainId": 632}, - {"name": "carriage", "id": 415, "trainId": 633}, - {"name": "brochure", "id": 297, "trainId": 634}, - {"name": "plaque", "id": 1914, "trainId": 635}, - {"name": "stringer", "id": 2619, "trainId": 636}, - {"name": "iron", "id": 1338, "trainId": 637}, - {"name": "spoon", "id": 2505, "trainId": 638}, - {"name": "flag pole", "id": 955, "trainId": 639}, - {"name": "toilet brush", "id": 2786, "trainId": 640}, - {"name": "book stand", "id": 238, "trainId": 641}, - {"name": "water faucet, water tap, tap, hydrant", "id": 3000, "trainId": 642}, - {"name": "ticket office", "id": 2763, "trainId": 643}, - {"name": "broom", "id": 299, "trainId": 644}, - {"name": "dvd", "id": 822, "trainId": 645}, - {"name": "ice bucket", "id": 1288, "trainId": 646}, - {"name": "carapace, shell, cuticle, shield", "id": 3101, "trainId": 647}, - {"name": "tureen", "id": 2894, "trainId": 648}, - {"name": "folders", "id": 992, "trainId": 649}, - {"name": "chess", "id": 489, "trainId": 650}, - {"name": "root", "id": 2157, "trainId": 651}, - {"name": "sewing machine", "id": 2309, "trainId": 652}, - {"name": "model", "id": 1576, "trainId": 653}, - {"name": "pen", "id": 1810, "trainId": 654}, - {"name": "violin", "id": 2964, "trainId": 655}, - {"name": "sweatshirt", "id": 2662, "trainId": 656}, - {"name": "recycling materials", "id": 2087, "trainId": 657}, - {"name": "mitten", "id": 1569, "trainId": 658}, - {"name": "chopping board, cutting board", "id": 503, "trainId": 659}, - {"name": "mask", "id": 1505, "trainId": 660}, - {"name": "log", "id": 1468, "trainId": 661}, - {"name": "mouse, computer mouse", "id": 1613, "trainId": 662}, - {"name": "grill", "id": 1138, "trainId": 663}, - {"name": "hole", "id": 1256, "trainId": 664}, - {"name": "target", "id": 2715, "trainId": 665}, - {"name": "trash bag", "id": 2846, "trainId": 666}, - {"name": "chalk", "id": 477, "trainId": 667}, - {"name": "sticks", "id": 2576, "trainId": 668}, - {"name": "balloon", "id": 108, "trainId": 669}, - {"name": "score", "id": 2245, "trainId": 670}, - {"name": "hair spray", "id": 1162, "trainId": 671}, - {"name": "roll", "id": 2149, "trainId": 672}, - {"name": "runner", "id": 2183, "trainId": 673}, - {"name": "engine", "id": 858, "trainId": 674}, - {"name": "inflatable glove", "id": 1324, "trainId": 675}, - {"name": "games", "id": 1055, "trainId": 676}, - {"name": "pallets", "id": 1741, "trainId": 677}, - {"name": "baskets", "id": 149, "trainId": 678}, - {"name": "coop", "id": 615, "trainId": 679}, - {"name": "dvd player", "id": 825, "trainId": 680}, - {"name": "rocking horse", "id": 2143, "trainId": 681}, - {"name": "buckets", "id": 304, "trainId": 682}, - {"name": "bread rolls", "id": 283, "trainId": 683}, - {"name": "shawl", "id": 2322, "trainId": 684}, - {"name": "watering can", "id": 3017, "trainId": 685}, - {"name": "spotlights", "id": 2510, "trainId": 686}, - {"name": "post-it", "id": 1960, "trainId": 687}, - {"name": "bowls", "id": 265, "trainId": 688}, - {"name": "security camera", "id": 2282, "trainId": 689}, - {"name": "runner cloth", "id": 2184, "trainId": 690}, - {"name": "lock", "id": 1461, "trainId": 691}, - {"name": "alarm, warning device, alarm system", "id": 3113, "trainId": 692}, - {"name": "side", "id": 2372, "trainId": 693}, - {"name": "roulette", "id": 2166, "trainId": 694}, - {"name": "bone", "id": 232, "trainId": 695}, - {"name": "cutlery", "id": 693, "trainId": 696}, - {"name": "pool balls", "id": 1945, "trainId": 697}, - {"name": "wheels", "id": 3039, "trainId": 698}, - {"name": "spice rack", "id": 2494, "trainId": 699}, - {"name": "plant pots", "id": 1908, "trainId": 700}, - {"name": "towel ring", "id": 2827, "trainId": 701}, - {"name": "bread box", "id": 280, "trainId": 702}, - {"name": "video", "id": 2950, "trainId": 703}, - {"name": "funfair", "id": 1044, "trainId": 704}, - {"name": "breads", "id": 288, "trainId": 705}, - {"name": "tripod", "id": 2863, "trainId": 706}, - {"name": "ironing board", "id": 1342, "trainId": 707}, - {"name": "skimmer", "id": 2409, "trainId": 708}, - {"name": "hollow", "id": 1258, "trainId": 709}, - {"name": "scratching post", "id": 2249, "trainId": 710}, - {"name": "tricycle", "id": 2862, "trainId": 711}, - {"name": "file box", "id": 920, "trainId": 712}, - {"name": "mountain pass", "id": 1607, "trainId": 713}, - {"name": "tombstones", "id": 2802, "trainId": 714}, - {"name": "cooker", "id": 610, "trainId": 715}, - {"name": "card game, cards", "id": 3129, "trainId": 716}, - {"name": "golf bag", "id": 1108, "trainId": 717}, - {"name": "towel paper", "id": 2823, "trainId": 718}, - {"name": "chaise lounge", "id": 476, "trainId": 719}, - {"name": "sun", "id": 2641, "trainId": 720}, - {"name": "toilet paper holder", "id": 2788, "trainId": 721}, - {"name": "rake", "id": 2070, "trainId": 722}, - {"name": "key", "id": 1368, "trainId": 723}, - {"name": "umbrella stand", "id": 2903, "trainId": 724}, - {"name": "dartboard", "id": 699, "trainId": 725}, - {"name": "transformer", "id": 2844, "trainId": 726}, - {"name": "fireplace utensils", "id": 942, "trainId": 727}, - {"name": "sweatshirts", "id": 2663, "trainId": 728}, - { - "name": "cellular telephone, cellular phone, cellphone, cell, mobile phone", - "id": 457, - "trainId": 729, - }, - {"name": "tallboy", "id": 2701, "trainId": 730}, - {"name": "stapler", "id": 2540, "trainId": 731}, - {"name": "sauna", "id": 2231, "trainId": 732}, - {"name": "test tube", "id": 2746, "trainId": 733}, - {"name": "palette", "id": 1738, "trainId": 734}, - {"name": "shopping carts", "id": 2350, "trainId": 735}, - {"name": "tools", "id": 2808, "trainId": 736}, - {"name": "push button, push, button", "id": 2025, "trainId": 737}, - {"name": "star", "id": 2541, "trainId": 738}, - {"name": "roof rack", "id": 2156, "trainId": 739}, - {"name": "barbed wire", "id": 126, "trainId": 740}, - {"name": "spray", "id": 2512, "trainId": 741}, - {"name": "ear", "id": 831, "trainId": 742}, - {"name": "sponge", "id": 2503, "trainId": 743}, - {"name": "racket", "id": 2039, "trainId": 744}, - {"name": "tins", "id": 2774, "trainId": 745}, - {"name": "eyeglasses", "id": 886, "trainId": 746}, - {"name": "file", "id": 919, "trainId": 747}, - {"name": "scarfs", "id": 2240, "trainId": 748}, - {"name": "sugar bowl", "id": 2636, "trainId": 749}, - {"name": "flip flop", "id": 963, "trainId": 750}, - {"name": "headstones", "id": 1218, "trainId": 751}, - {"name": "laptop bag", "id": 1406, "trainId": 752}, - {"name": "leash", "id": 1420, "trainId": 753}, - {"name": "climbing frame", "id": 526, "trainId": 754}, - {"name": "suit hanger", "id": 2639, "trainId": 755}, - {"name": "floor spotlight", "id": 975, "trainId": 756}, - {"name": "plate rack", "id": 1921, "trainId": 757}, - {"name": "sewer", "id": 2305, "trainId": 758}, - {"name": "hard drive", "id": 1193, "trainId": 759}, - {"name": "sprinkler", "id": 2517, "trainId": 760}, - {"name": "tools box", "id": 2809, "trainId": 761}, - {"name": "necklace", "id": 1647, "trainId": 762}, - {"name": "bulbs", "id": 314, "trainId": 763}, - {"name": "steel industry", "id": 2560, "trainId": 764}, - {"name": "club", "id": 545, "trainId": 765}, - {"name": "jack", "id": 1345, "trainId": 766}, - {"name": "door bars", "id": 775, "trainId": 767}, - { - "name": "control panel, instrument panel, control board, board, panel", - "id": 603, - "trainId": 768, - }, - {"name": "hairbrush", "id": 1163, "trainId": 769}, - {"name": "napkin holder", "id": 1641, "trainId": 770}, - {"name": "office", "id": 1678, "trainId": 771}, - {"name": "smoke detector", "id": 2450, "trainId": 772}, - {"name": "utensils", "id": 2915, "trainId": 773}, - {"name": "apron", "id": 42, "trainId": 774}, - {"name": "scissors", "id": 2242, "trainId": 775}, - {"name": "terminal", "id": 2741, "trainId": 776}, - {"name": "grinder", "id": 1143, "trainId": 777}, - {"name": "entry phone", "id": 862, "trainId": 778}, - {"name": "newspaper stand", "id": 1654, "trainId": 779}, - {"name": "pepper shaker", "id": 1826, "trainId": 780}, - {"name": "onions", "id": 1689, "trainId": 781}, - { - "name": "central processing unit, cpu, c p u , central processor, processor, mainframe", - "id": 3124, - "trainId": 782, - }, - {"name": "tape", "id": 2710, "trainId": 783}, - {"name": "bat", "id": 152, "trainId": 784}, - {"name": "coaster", "id": 549, "trainId": 785}, - {"name": "calculator", "id": 360, "trainId": 786}, - {"name": "potatoes", "id": 1982, "trainId": 787}, - {"name": "luggage rack", "id": 1478, "trainId": 788}, - {"name": "salt", "id": 2203, "trainId": 789}, - {"name": "street number", "id": 2612, "trainId": 790}, - {"name": "viewpoint", "id": 2956, "trainId": 791}, - {"name": "sword", "id": 2681, "trainId": 792}, - {"name": "cd", "id": 437, "trainId": 793}, - {"name": "rowing machine", "id": 2171, "trainId": 794}, - {"name": "plug", "id": 1933, "trainId": 795}, - {"name": "andiron, firedog, dog, dog-iron", "id": 3110, "trainId": 796}, - {"name": "pepper", "id": 1824, "trainId": 797}, - {"name": "tongs", "id": 2803, "trainId": 798}, - {"name": "bonfire", "id": 234, "trainId": 799}, - {"name": "dog dish", "id": 764, "trainId": 800}, - {"name": "belt", "id": 177, "trainId": 801}, - {"name": "dumbbells", "id": 817, "trainId": 802}, - {"name": "videocassette recorder, vcr", "id": 3145, "trainId": 803}, - {"name": "hook", "id": 1262, "trainId": 804}, - {"name": "envelopes", "id": 864, "trainId": 805}, - {"name": "shower faucet", "id": 2359, "trainId": 806}, - {"name": "watch", "id": 2992, "trainId": 807}, - {"name": "padlock", "id": 1725, "trainId": 808}, - {"name": "swimming pool ladder", "id": 2667, "trainId": 809}, - {"name": "spanners", "id": 2484, "trainId": 810}, - {"name": "gravy boat", "id": 1133, "trainId": 811}, - {"name": "notice board", "id": 1667, "trainId": 812}, - {"name": "trash bags", "id": 2847, "trainId": 813}, - {"name": "fire alarm", "id": 932, "trainId": 814}, - {"name": "ladle", "id": 1392, "trainId": 815}, - {"name": "stethoscope", "id": 2573, "trainId": 816}, - {"name": "rocket", "id": 2140, "trainId": 817}, - {"name": "funnel", "id": 1046, "trainId": 818}, - {"name": "bowling pins", "id": 264, "trainId": 819}, - {"name": "valve", "id": 2927, "trainId": 820}, - {"name": "thermometer", "id": 2752, "trainId": 821}, - {"name": "cups", "id": 679, "trainId": 822}, - {"name": "spice jar", "id": 2493, "trainId": 823}, - {"name": "night light", "id": 1658, "trainId": 824}, - {"name": "soaps", "id": 2466, "trainId": 825}, - {"name": "games table", "id": 1057, "trainId": 826}, - {"name": "slotted spoon", "id": 2444, "trainId": 827}, - {"name": "reel", "id": 2093, "trainId": 828}, - {"name": "scourer", "id": 2248, "trainId": 829}, - {"name": "sleeping robe", "id": 2432, "trainId": 830}, - {"name": "desk mat", "id": 726, "trainId": 831}, - {"name": "dumbbell", "id": 816, "trainId": 832}, - {"name": "hammer", "id": 1171, "trainId": 833}, - {"name": "tie", "id": 2766, "trainId": 834}, - {"name": "typewriter", "id": 2900, "trainId": 835}, - {"name": "shaker", "id": 2313, "trainId": 836}, - {"name": "cheese dish", "id": 488, "trainId": 837}, - {"name": "sea star", "id": 2265, "trainId": 838}, - {"name": "racquet", "id": 2043, "trainId": 839}, - {"name": "butane gas cylinder", "id": 332, "trainId": 840}, - {"name": "paper weight", "id": 1771, "trainId": 841}, - {"name": "shaving brush", "id": 2320, "trainId": 842}, - {"name": "sunglasses", "id": 2646, "trainId": 843}, - {"name": "gear shift", "id": 1089, "trainId": 844}, - {"name": "towel rail", "id": 2826, "trainId": 845}, - {"name": "adding machine, totalizer, totaliser", "id": 3148, "trainId": 846}, -] - - -def _get_ade20k_full_meta(): - # Id 0 is reserved for ignore_label, we change ignore_label for 0 - # to 255 in our pre-processing, so all ids are shifted by 1. - stuff_ids = [k["id"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES] - assert len(stuff_ids) == 847, len(stuff_ids) - - # For semantic segmentation, this mapping maps from contiguous stuff id - # (in [0, 91], used in models) to ids in the dataset (used for processing results) - stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)} - stuff_classes = [k["name"] for k in ADE20K_SEM_SEG_FULL_CATEGORIES] - - ret = { - "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, - "stuff_classes": stuff_classes, - } - return ret - - -def register_all_ade20k_full(root): - root = os.path.join(root, "ADE20K_2021_17_01") - meta = _get_ade20k_full_meta() - for name, dirname in [("train", "training"), ("val", "validation")]: - image_dir = os.path.join(root, "images_detectron2", dirname) - gt_dir = os.path.join(root, "annotations_detectron2", dirname) - name = f"ade20k_full_sem_seg_{name}" - DatasetCatalog.register( - name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="tif", image_ext="jpg") - ) - MetadataCatalog.get(name).set( - stuff_classes=meta["stuff_classes"][:], - image_root=image_dir, - sem_seg_root=gt_dir, - evaluator_type="sem_seg", - ignore_label=65535, # NOTE: gt is saved in 16-bit TIFF images - ) - - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_all_ade20k_full(_root) diff --git a/spaces/EPFL-VILAB/MultiMAE/utils/random_erasing.py b/spaces/EPFL-VILAB/MultiMAE/utils/random_erasing.py deleted file mode 100644 index 5b76b60e45b146b3aa0783f9a85b746bef1e311c..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/utils/random_erasing.py +++ /dev/null @@ -1,103 +0,0 @@ -# -------------------------------------------------------- -# Based on timm and MAE-priv code bases -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/BUPT-PRIV/MAE-priv -# -------------------------------------------------------- -""" Random Erasing (Cutout) - -Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 -Copyright Zhun Zhong & Liang Zheng - -Hacked together by / Copyright 2020 Ross Wightman -""" -import math -import random - -import torch - - -def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): - # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() - # paths, flip the order so normal is run on CPU if this becomes a problem - # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 - if per_pixel: - return torch.empty(patch_size, dtype=dtype, device=device).normal_() - elif rand_color: - return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() - else: - return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) - - -class RandomErasing: - """ Randomly selects a rectangle region in an image and erases its pixels. - 'Random Erasing Data Augmentation' by Zhong et al. - See https://arxiv.org/pdf/1708.04896.pdf - - This variant of RandomErasing is intended to be applied to either a batch - or single image tensor after it has been normalized by dataset mean and std. - Args: - probability: Probability that the Random Erasing operation will be performed. - min_area: Minimum percentage of erased area wrt input image area. - max_area: Maximum percentage of erased area wrt input image area. - min_aspect: Minimum aspect ratio of erased area. - mode: pixel color mode, one of 'const', 'rand', or 'pixel' - 'const' - erase block is constant color of 0 for all channels - 'rand' - erase block is same per-channel random (normal) color - 'pixel' - erase block is per-pixel random (normal) color - max_count: maximum number of erasing blocks per image, area per box is scaled by count. - per-image count is randomly chosen between 1 and this value. - """ - - def __init__( - self, - probability=0.5, min_area=0.02, max_area=1 / 3, min_aspect=0.3, max_aspect=None, - mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): - self.probability = probability - self.min_area = min_area - self.max_area = max_area - max_aspect = max_aspect or 1 / min_aspect - self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) - self.min_count = min_count - self.max_count = max_count or min_count - self.num_splits = num_splits - mode = mode.lower() - self.rand_color = False - self.per_pixel = False - if mode == 'rand': - self.rand_color = True # per block random normal - elif mode == 'pixel': - self.per_pixel = True # per pixel random normal - else: - assert not mode or mode == 'const' - self.device = device - - def _erase(self, img, chan, img_h, img_w, dtype): - if random.random() > self.probability: - return - area = img_h * img_w - count = self.min_count if self.min_count == self.max_count else \ - random.randint(self.min_count, self.max_count) - for _ in range(count): - for attempt in range(10): - target_area = random.uniform(self.min_area, self.max_area) * area / count - aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) - h = int(round(math.sqrt(target_area * aspect_ratio))) - w = int(round(math.sqrt(target_area / aspect_ratio))) - if w < img_w and h < img_h: - top = random.randint(0, img_h - h) - left = random.randint(0, img_w - w) - img[:, top:top + h, left:left + w] = _get_pixels( - self.per_pixel, self.rand_color, (chan, h, w), - dtype=dtype, device=self.device) - break - - def __call__(self, input): - if len(input.size()) == 3: - self._erase(input, *input.size(), input.dtype) - else: - batch_size, chan, img_h, img_w = input.size() - # skip first slice of batch if num_splits is set (for clean portion of samples) - batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 - for i in range(batch_start, batch_size): - self._erase(input[i], chan, img_h, img_w, input.dtype) - return input diff --git a/spaces/Eddycrack864/Applio-Inference/tools/infer_cli.py b/spaces/Eddycrack864/Applio-Inference/tools/infer_cli.py deleted file mode 100644 index bbe0a53c1aac6a8f2d42613d554b2bdd07abea2d..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/tools/infer_cli.py +++ /dev/null @@ -1,67 +0,0 @@ -import argparse -import os -import sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -from dotenv import load_dotenv -from scipy.io import wavfile - -from configs.config import Config -from infer.modules.vc.modules import VC - -#### -# USAGE -# -# In your Terminal or CMD or whatever - - -def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--f0up_key", type=int, default=0) - parser.add_argument("--input_path", type=str, help="input path") - parser.add_argument("--index_path", type=str, help="index path") - parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm") - parser.add_argument("--opt_path", type=str, help="opt path") - parser.add_argument("--model_name", type=str, help="store in assets/weight_root") - parser.add_argument("--index_rate", type=float, default=0.66, help="index rate") - parser.add_argument("--device", type=str, help="device") - parser.add_argument("--is_half", type=bool, help="use half -> True") - parser.add_argument("--filter_radius", type=int, default=3, help="filter radius") - parser.add_argument("--resample_sr", type=int, default=0, help="resample sr") - parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate") - parser.add_argument("--protect", type=float, default=0.33, help="protect") - - args = parser.parse_args() - sys.argv = sys.argv[:1] - - return args - - -def main(): - load_dotenv() - args = arg_parse() - config = Config() - config.device = args.device if args.device else config.device - config.is_half = args.is_half if args.is_half else config.is_half - vc = VC(config) - vc.get_vc(args.model_name) - _, wav_opt = vc.vc_single( - 0, - args.input_path, - args.f0up_key, - None, - args.f0method, - args.index_path, - None, - args.index_rate, - args.filter_radius, - args.resample_sr, - args.rms_mix_rate, - args.protect, - ) - wavfile.write(args.opt_path, wav_opt[0], wav_opt[1]) - - -if __name__ == "__main__": - main() diff --git a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/archs/__init__.py b/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/archs/__init__.py deleted file mode 100644 index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000 --- a/spaces/ElainaFanBoy/IRONY-Real-ESRGAN/realesrgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/Eriberto/whisper-to-chatGPT/README.md b/spaces/Eriberto/whisper-to-chatGPT/README.md deleted file mode 100644 index 39effef8b05b1e6f749204d6e53842b91bbb7e7c..0000000000000000000000000000000000000000 --- a/spaces/Eriberto/whisper-to-chatGPT/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Whisper to chatGPT -emoji: 👄🤖 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: Xhaheen/whisper-to-chatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Fernando22/freegpt-webui/client/css/settings.css b/spaces/Fernando22/freegpt-webui/client/css/settings.css deleted file mode 100644 index d1187148b4ee6d8db141d736926b510410cca36f..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/client/css/settings.css +++ /dev/null @@ -1,44 +0,0 @@ -.settings-container { - margin: 24px 0px 8px 0px; - justify-content: center; -} - -.settings-container span { - font-size: 0.875rem; - margin: 0; -} - -.settings-container label { - width: 24px; - height: 16px; -} - -.settings-container .field { - justify-content: space-between; -} - -.settings-container .checkbox input + label, -.settings-container .checkbox input:checked + label:after { - background: var(--colour-1); -} - -.settings-container .checkbox input + label:after, -.settings-container .checkbox input:checked + label { - background: var(--colour-3); -} - -.settings-container .checkbox label:after { - left: 2px; - width: 10px; - height: 10px; -} - -.settings-container .checkbox input:checked + label:after { - left: calc(100% - 2px - 10px); -} - -.settings-container .dropdown { - padding: 4px 8px; - font-size: 0.75rem; -} - diff --git a/spaces/Fisharp/starcoder-playground/static/styles.css b/spaces/Fisharp/starcoder-playground/static/styles.css deleted file mode 100644 index 7a6fe3687d95d64f8372bbd0af600f4f61b89a47..0000000000000000000000000000000000000000 --- a/spaces/Fisharp/starcoder-playground/static/styles.css +++ /dev/null @@ -1,78 +0,0 @@ -@import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;600;700&display=swap'); - -h1, h2 { - font-family: 'IBM Plex Mono', sans-serif; -} - -.generating { - visibility: hidden -} - -.gradio-container { - color: black -} - -/* monospace_css */ -#q-input textarea { - font-family: monospace, 'Consolas', Courier, monospace; -} - -/* Share Button */ - -/* it was hidden directly inside the svg xml content */ -#share-btn-loading-icon { - display: none; -} - -a { - text-decoration-line: underline; - font-weight: 600; -} - -.animate-spin { - animation: spin 1s linear infinite; -} - -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} - -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - width: 15rem; -} - -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor: pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.25rem !important; - padding-bottom: 0.25rem !important; -} - -#share-btn * { - all: unset; -} - -#share-btn-container div:nth-child(-n+2) { - width: auto !important; - min-height: 0px !important; -} - -#share-btn-container .wrap { - display: none !important; -} diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/F0Predictor/HarvestF0Predictor.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index 122bdbb4c736feb4a8d974eca03df71aede76f69..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,81 +0,0 @@ -from modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - -class HarvestF0Predictor(F0Predictor): - def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self,f0): - ''' - 对F0进行插值处理 - ''' - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:,0], vuv_vector[:,0] - - def resize_f0(self,x, target_len): - source = np.array(x) - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) - res = np.nan_to_num(target) - return res - - def compute_f0(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/GeorgeOrville/bingo/src/components/theme-toggle.tsx b/spaces/GeorgeOrville/bingo/src/components/theme-toggle.tsx deleted file mode 100644 index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/components/theme-toggle.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import { useTheme } from 'next-themes' - -import { Button } from '@/components/ui/button' -import { IconMoon, IconSun } from '@/components/ui/icons' - -export function ThemeToggle() { - const { setTheme, theme } = useTheme() - const [_, startTransition] = React.useTransition() - - return ( - - ) -} diff --git a/spaces/GitHunter0/100_prisoners_problem_app/Home.py b/spaces/GitHunter0/100_prisoners_problem_app/Home.py deleted file mode 100644 index 03bb5d75545c2c05412af696f081d47b69f8e731..0000000000000000000000000000000000000000 --- a/spaces/GitHunter0/100_prisoners_problem_app/Home.py +++ /dev/null @@ -1,97 +0,0 @@ - - -#%% APP ROOT PAGE - - -#%%% Packages Installation - -if False: - ''' - # - # CONDA ENV - # Use mamba in the case of creating a new conda env using an environment.yaml file - conda install mamba -n base -c conda-forge - mamba env create -f environment.yml - ''' - - -#%%% Modules Importation - -import streamlit as st -import pandas as pd -import numpy as np -# -import datetime, re # deal with date/time and regex -import plotly -# -import base64 -import PIL -import requests -import os -import dotenv - -from functions.module_project_specific_functions import ( - f_streamlit_hide_menu_and_marks, - f_streamlit_customize_page, -) - -#%%% App Configuration - -#%%%% Page Configuration - -# set_page_config() can only be called once per app, and must be called as -# the first Streamlit command in your script. -st.set_page_config( - page_title = "100 Prisoners Game Riddle", - page_icon='www/100_prisoners_problem_favicon_1.jpg', # None ":memo:", ... - layout='wide', # centered, wide - initial_sidebar_state='auto' # auto, expanded, collapsed -) - -#%%%% Set Environment Mode ('test' or 'production') and Load Configurations - -# load environment variables -dotenv.load_dotenv(".env", override=True, verbose=True) -# -os.getenv("ENV_MODE") -os.getenv("APP_NAME") -os.getenv("APP_FOLDER_NAME") -print(st.session_state) -# -if "config_mode" not in st.session_state: - st.session_state['config_mode'] = os.getenv("ENV_MODE") - - -#%%%% Remove Hamburger Menu and Streamlit logo - -if st.session_state['config_mode']=='production': - - f_streamlit_hide_menu_and_marks() - f_streamlit_customize_page(padding_top="0px", margin_top="0px") - - -#%%% Sidebar Layout - -title = "100 PRISONERS PROBLEM" -# st.title(title) -st.markdown(f"

    {title}

    ", - unsafe_allow_html=True) - -subtitle = "The Most Intriguing Riddle in Probability Theory" -st.markdown(f'''

    - {subtitle}

    ''', unsafe_allow_html=True) - -cols = st.columns([2,2]) - -with cols[0]: - st.image( - PIL.Image.open("www/100_prisoners_problem_1.png"), - use_column_width=False, width=410 - ) - -with cols[1]: - st.video("https://youtu.be/iSNsgj1OCLA") - - -#%% _________________________________________________________ - - diff --git a/spaces/Gladiator/gradient_dissent_bot/src/embed.py b/spaces/Gladiator/gradient_dissent_bot/src/embed.py deleted file mode 100644 index ac0db250b346e6f674fd2132242a5b951874f085..0000000000000000000000000000000000000000 --- a/spaces/Gladiator/gradient_dissent_bot/src/embed.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -from dataclasses import asdict - -import pandas as pd -from langchain.callbacks import get_openai_callback -from langchain.document_loaders import DataFrameLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.text_splitter import TokenTextSplitter -from langchain.vectorstores import Chroma -from tqdm import tqdm -from wandb.integration.langchain import WandbTracer - -import wandb -from config import config - - -def get_data(artifact_name: str, total_episodes=None): - podcast_artifact = wandb.use_artifact(artifact_name, type="dataset") - podcast_artifact_dir = podcast_artifact.download(config.root_artifact_dir) - filename = artifact_name.split(":")[0].split("/")[-1] - df = pd.read_csv(os.path.join(podcast_artifact_dir, f"{filename}.csv")) - if total_episodes is not None: - df = df.iloc[:total_episodes] - return df - - -def create_embeddings(episode_df: pd.DataFrame, index: int): - # load docs into langchain format - loader = DataFrameLoader(episode_df, page_content_column="transcript") - data = loader.load() - - # split the documents - text_splitter = TokenTextSplitter.from_tiktoken_encoder(chunk_size=1000, chunk_overlap=0) - docs = text_splitter.split_documents(data) - - title = data[0].metadata["title"] - print(f"Number of documents for podcast {title}: {len(docs)}") - - # initialize embedding engine - embeddings = OpenAIEmbeddings() - - db = Chroma.from_documents( - docs, - embeddings, - persist_directory=os.path.join(config.root_data_dir / "chromadb", str(index)), - ) - db.persist() - - -if __name__ == "__main__": - # initialize wandb tracer - WandbTracer.init( - { - "project": config.project_name, - "job_type": "embed_transcripts", - "config": asdict(config), - } - ) - - # get data - df = get_data(artifact_name=config.summarized_que_data_artifact) - - # create embeddings - with get_openai_callback() as cb: - for episode in tqdm(df.iterrows(), total=len(df), desc="Embedding transcripts"): - episode_data = episode[1].to_frame().T - - create_embeddings(episode_data, index=episode[0]) - - print("*" * 25) - print(cb) - print("*" * 25) - - wandb.log( - { - "total_prompt_tokens": cb.prompt_tokens, - "total_completion_tokens": cb.completion_tokens, - "total_tokens": cb.total_tokens, - "total_cost": cb.total_cost, - } - ) - - # log embeddings to wandb artifact - artifact = wandb.Artifact("transcript_embeddings", type="dataset") - artifact.add_dir(config.root_data_dir / "chromadb") - wandb.log_artifact(artifact) - - WandbTracer.finish() diff --git a/spaces/Godrose0728/Aisound02/text/mandarin.py b/spaces/Godrose0728/Aisound02/text/mandarin.py deleted file mode 100644 index ff71de9788e4f20c897b971a775d1ecfbfe1c7b7..0000000000000000000000000000000000000000 --- a/spaces/Godrose0728/Aisound02/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text diff --git a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/bias_act.cpp b/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/bias_act.cpp deleted file mode 100644 index 5d2425d8054991a8e8b6f7a940fd0ff7fa0bb330..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/bias_act.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "bias_act.h" - -//------------------------------------------------------------------------ - -static bool has_same_layout(torch::Tensor x, torch::Tensor y) -{ - if (x.dim() != y.dim()) - return false; - for (int64_t i = 0; i < x.dim(); i++) - { - if (x.size(i) != y.size(i)) - return false; - if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) - return false; - } - return true; -} - -//------------------------------------------------------------------------ - -static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); - TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); - TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); - TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(b.dim() == 1, "b must have rank 1"); - TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); - TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); - TORCH_CHECK(grad >= 0, "grad must be non-negative"); - - // Validate layout. - TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); - TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); - TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); - TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); - TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - torch::Tensor y = torch::empty_like(x); - TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); - - // Initialize CUDA kernel parameters. - bias_act_kernel_params p; - p.x = x.data_ptr(); - p.b = (b.numel()) ? b.data_ptr() : NULL; - p.xref = (xref.numel()) ? xref.data_ptr() : NULL; - p.yref = (yref.numel()) ? yref.data_ptr() : NULL; - p.dy = (dy.numel()) ? dy.data_ptr() : NULL; - p.y = y.data_ptr(); - p.grad = grad; - p.act = act; - p.alpha = alpha; - p.gain = gain; - p.clamp = clamp; - p.sizeX = (int)x.numel(); - p.sizeB = (int)b.numel(); - p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; - - // Choose CUDA kernel. - void* kernel; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - kernel = choose_bias_act_kernel(p); - }); - TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); - - // Launch CUDA kernel. - p.loopX = 4; - int blockSize = 4 * 32; - int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("bias_act", &bias_act); -} - -//------------------------------------------------------------------------ diff --git a/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/pdf_opt.py b/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/pdf_opt.py deleted file mode 100644 index 19a25245884fb127d93ab1ce4f23e30109dc96e5..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/Gradio_YOLOv5_Det/util/pdf_opt.py +++ /dev/null @@ -1,78 +0,0 @@ -# PDF management -# author: Zeng Yifu -# creation time: 2022-05-05 - -from fpdf import FPDF - - -# PDF generation class -class PDF(FPDF): - # Reference: https://pyfpdf.readthedocs.io/en/latest/Tutorial/index.html - def header(self): - # Set Chinese font - self.add_font("SimSun", "", "./fonts/SimSun.ttf", uni=True) - self.set_font("SimSun", "", 16) - # Calculate width of title and position - w = self.get_string_width(title) + 6 - self.set_x((210 - w) / 2) - # Colors of frame, background and text - self.set_draw_color(255, 255, 255) - self.set_fill_color(255, 255, 255) - self.set_text_color(0, 0, 0) - # Thickness of frame (1 mm) - # self.set_line_width(1) - # Title - self.cell(w, 9, title, 1, 1, "C", 1) - # Line break - self.ln(10) - - def footer(self): - # Position at 1.5 cm from bottom - self.set_y(-15) - # Set Chinese font - self.add_font("SimSun", "", "./fonts/SimSun.ttf", uni=True) - self.set_font("SimSun", "", 12) - # Text color in gray - self.set_text_color(128) - # Page number - self.cell(0, 10, "Page " + str(self.page_no()), 0, 0, "C") - - def chapter_title(self, num, label): - # Set Chinese font - self.add_font("SimSun", "", "./fonts/SimSun.ttf", uni=True) - self.set_font("SimSun", "", 12) - # Background color - self.set_fill_color(200, 220, 255) - # Title - # self.cell(0, 6, 'Chapter %d : %s' % (num, label), 0, 1, 'L', 1) - self.cell(0, 6, "Detection Result:", 0, 1, "L", 1) - # Line break - self.ln(4) - - def chapter_body(self, name): - - # Set Chinese font - self.add_font("SimSun", "", "./fonts/SimSun.ttf", uni=True) - self.set_font("SimSun", "", 12) - # Output justified text - self.multi_cell(0, 5, name) - # Line break - self.ln() - self.cell(0, 5, "--------------------------------------") - - def print_chapter(self, num, title, name): - self.add_page() - self.chapter_title(num, title) - self.chapter_body(name) - - -# pdf generation function -def pdf_generate(input_file, output_file, title_): - global title - - title = title_ - pdf = PDF() - pdf.set_title(title) - pdf.set_author("Zeng Yifu") - pdf.print_chapter(1, "A RUNAWAY REEF", input_file) - pdf.output(output_file) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_train.py b/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_train.py deleted file mode 100644 index 4e50dbf3bde9b74fa4fe1ab4d2b1a9da6698f3f6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_train.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -Train a super-resolution model. -""" - -import argparse - -import torch.nn.functional as F - -from pixel_guide_diffusion import dist_util, logger -from pixel_guide_diffusion.image_datasets import load_data -from pixel_guide_diffusion.resample import create_named_schedule_sampler -from pixel_guide_diffusion.script_util import ( - pg_model_and_diffusion_defaults, - pg_create_model_and_diffusion, - args_to_dict, - add_dict_to_argparser, -) -from pixel_guide_diffusion.train_util import TrainLoop - - -def main(): - args = create_argparser().parse_args() - - dist_util.setup_dist() - logger.configure() - - logger.log("creating model...") - model, diffusion = pg_create_model_and_diffusion( - **args_to_dict(args, pg_model_and_diffusion_defaults().keys()) - ) - model.to(dist_util.dev()) - schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion) - - logger.log("creating data loader...") - data = load_data( - data_dir=args.data_dir, - batch_size=args.batch_size, - image_size=args.image_size, - class_cond=args.class_cond, - guide_dir=args.guide_dir, - guide_size=args.guide_size, - deterministic=True, - ) - - logger.log("training...") - TrainLoop( - model=model, - diffusion=diffusion, - data=data, - batch_size=args.batch_size, - microbatch=args.microbatch, - lr=args.lr, - ema_rate=args.ema_rate, - log_interval=args.log_interval, - save_interval=args.save_interval, - resume_checkpoint=args.resume_checkpoint, - use_fp16=args.use_fp16, - fp16_scale_growth=args.fp16_scale_growth, - schedule_sampler=schedule_sampler, - weight_decay=args.weight_decay, - lr_anneal_steps=args.lr_anneal_steps, - ).run_loop() - - -def create_argparser(): - defaults = dict( - data_dir="", - guide_dir="", - schedule_sampler="uniform", - lr=1e-4, - weight_decay=0.0, - lr_anneal_steps=0, - batch_size=1, - microbatch=-1, - ema_rate="0.9999", - log_interval=10, - save_interval=10000, - resume_checkpoint="", - use_fp16=False, - fp16_scale_growth=1e-3, - ) - defaults.update(pg_model_and_diffusion_defaults()) - parser = argparse.ArgumentParser() - add_dict_to_argparser(parser, defaults) - return parser - - -if __name__ == "__main__": - main() diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/backbones/detectors_resnet.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/backbones/detectors_resnet.py deleted file mode 100644 index 519db464493c7c7b60fc34be1d21add2235ec341..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/backbones/detectors_resnet.py +++ /dev/null @@ -1,305 +0,0 @@ -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init - -from ..builder import BACKBONES -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottleneck(_Bottleneck): - r"""Bottleneck for the ResNet backbone in `DetectoRS - `_. - - This bottleneck allows the users to specify whether to use - SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). - - Args: - inplanes (int): The number of input channels. - planes (int): The number of output channels before expansion. - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - sac (dict, optional): Dictionary to construct SAC. Default: None. - """ - expansion = 4 - - def __init__(self, - inplanes, - planes, - rfp_inplanes=None, - sac=None, - **kwargs): - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - assert sac is None or isinstance(sac, dict) - self.sac = sac - self.with_sac = sac is not None - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False) - - self.rfp_inplanes = rfp_inplanes - if self.rfp_inplanes: - self.rfp_conv = build_conv_layer( - None, - self.rfp_inplanes, - planes * self.expansion, - 1, - stride=1, - bias=True) - self.init_weights() - - def init_weights(self): - """Initialize the weights.""" - if self.rfp_inplanes: - constant_init(self.rfp_conv, 0) - - def rfp_forward(self, x, rfp_feat): - """The forward function that also takes the RFP features as input.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - if self.rfp_inplanes: - rfp_feat = self.rfp_conv(rfp_feat) - out = out + rfp_feat - - out = self.relu(out) - - return out - - -class ResLayer(nn.Sequential): - """ResLayer to build ResNet style backbone for RPF in detectoRS. - - The difference between this module and base class is that we pass - ``rfp_inplanes`` to the first block. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - rfp_inplanes=None, - **kwargs): - self.block = block - assert downsample_first, f'downsample_first={downsample_first} is ' \ - 'not supported in DetectoRS' - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - if avg_down and stride != 1: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - rfp_inplanes=rfp_inplanes, - **kwargs)) - inplanes = planes * block.expansion - for _ in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - super(ResLayer, self).__init__(*layers) - - -@BACKBONES.register_module() -class DetectoRS_ResNet(ResNet): - """ResNet backbone for DetectoRS. - - Args: - sac (dict, optional): Dictionary to construct SAC (Switchable Atrous - Convolution). Default: None. - stage_with_sac (list): Which stage to use sac. Default: (False, False, - False, False). - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - output_img (bool): If ``True``, the input image will be inserted into - the starting position of output. Default: False. - pretrained (str, optional): The pretrained model to load. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - sac=None, - stage_with_sac=(False, False, False, False), - rfp_inplanes=None, - output_img=False, - pretrained=None, - **kwargs): - self.sac = sac - self.stage_with_sac = stage_with_sac - self.rfp_inplanes = rfp_inplanes - self.output_img = output_img - self.pretrained = pretrained - super(DetectoRS_ResNet, self).__init__(**kwargs) - - self.inplanes = self.stem_channels - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = self.strides[i] - dilation = self.dilations[i] - dcn = self.dcn if self.stage_with_dcn[i] else None - sac = self.sac if self.stage_with_sac[i] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, i) - else: - stage_plugins = None - planes = self.base_channels * 2**i - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=planes, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - sac=sac, - rfp_inplanes=rfp_inplanes if i > 0 else None, - plugins=stage_plugins) - self.inplanes = planes * self.block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" - return ResLayer(**kwargs) - - def forward(self, x): - """Forward function.""" - outs = list(super(DetectoRS_ResNet, self).forward(x)) - if self.output_img: - outs.insert(0, x) - return tuple(outs) - - def rfp_forward(self, x, rfp_feats): - """Forward function for RFP.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - rfp_feat = rfp_feats[i] if i > 0 else None - for layer in res_layer: - x = layer.rfp_forward(x, rfp_feat) - if i in self.out_indices: - outs.append(x) - return tuple(outs) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/README.md deleted file mode 100644 index 01ed322587c23eac095fd870fca87d2a100fa24e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/psanet/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# PSANet: Point-wise Spatial Attention Network for Scene Parsing - -## Introduction - - - -```latex -@inproceedings{zhao2018psanet, - title={Psanet: Point-wise spatial attention network for scene parsing}, - author={Zhao, Hengshuang and Zhang, Yi and Liu, Shu and Shi, Jianping and Change Loy, Chen and Lin, Dahua and Jia, Jiaya}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - pages={267--283}, - year={2018} -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| PSANet | R-50-D8 | 512x1024 | 40000 | 7 | 3.17 | 77.63 | 79.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117-99fac37c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_40k_cityscapes/psanet_r50-d8_512x1024_40k_cityscapes_20200606_103117.log.json) | -| PSANet | R-101-D8 | 512x1024 | 40000 | 10.5 | 2.20 | 79.14 | 80.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418-27b9cfa7.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_40k_cityscapes/psanet_r101-d8_512x1024_40k_cityscapes_20200606_001418.log.json) | -| PSANet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.40 | 77.99 | 79.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717-d5365506.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_40k_cityscapes/psanet_r50-d8_769x769_40k_cityscapes_20200530_033717.log.json) | -| PSANet | R-101-D8 | 769x769 | 40000 | 11.9 | 0.98 | 78.43 | 80.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107-997da1e6.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_40k_cityscapes/psanet_r101-d8_769x769_40k_cityscapes_20200530_035107.log.json) | -| PSANet | R-50-D8 | 512x1024 | 80000 | - | - | 77.24 | 78.69 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842-ab60a24f.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x1024_80k_cityscapes/psanet_r50-d8_512x1024_80k_cityscapes_20200606_161842.log.json) | -| PSANet | R-101-D8 | 512x1024 | 80000 | - | - | 79.31 | 80.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823-0f73a169.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x1024_80k_cityscapes/psanet_r101-d8_512x1024_80k_cityscapes_20200606_161823.log.json) | -| PSANet | R-50-D8 | 769x769 | 80000 | - | - | 79.31 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134-fe42f49e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_769x769_80k_cityscapes/psanet_r50-d8_769x769_80k_cityscapes_20200606_225134.log.json) | -| PSANet | R-101-D8 | 769x769 | 80000 | - | - | 79.69 | 80.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550-7665827b.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_769x769_80k_cityscapes/psanet_r101-d8_769x769_80k_cityscapes_20200606_214550.log.json) | - -### ADE20K - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| PSANet | R-50-D8 | 512x512 | 80000 | 9 | 18.91 | 41.14 | 41.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141-835e4b97.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_80k_ade20k/psanet_r50-d8_512x512_80k_ade20k_20200614_144141.log.json) | -| PSANet | R-101-D8 | 512x512 | 80000 | 12.5 | 13.13 | 43.80 | 44.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117-1fab60d4.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_80k_ade20k/psanet_r101-d8_512x512_80k_ade20k_20200614_185117.log.json) | -| PSANet | R-50-D8 | 512x512 | 160000 | - | - | 41.67 | 42.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258-148077dd.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_160k_ade20k/psanet_r50-d8_512x512_160k_ade20k_20200615_161258.log.json) | -| PSANet | R-101-D8 | 512x512 | 160000 | - | - | 43.74 | 45.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537-dbfa564c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_160k_ade20k/psanet_r101-d8_512x512_160k_ade20k_20200615_161537.log.json) | - -### Pascal VOC 2012 + Aug - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| PSANet | R-50-D8 | 512x512 | 20000 | 6.9 | 18.24 | 76.39 | 77.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413-2f1bbaa1.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_20k_voc12aug/psanet_r50-d8_512x512_20k_voc12aug_20200617_102413.log.json) | -| PSANet | R-101-D8 | 512x512 | 20000 | 10.4 | 12.63 | 77.91 | 79.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624-946fef11.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_20k_voc12aug/psanet_r101-d8_512x512_20k_voc12aug_20200617_110624.log.json) | -| PSANet | R-50-D8 | 512x512 | 40000 | - | - | 76.30 | 77.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946-f596afb5.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r50-d8_512x512_40k_voc12aug/psanet_r50-d8_512x512_40k_voc12aug_20200613_161946.log.json) | -| PSANet | R-101-D8 | 512x512 | 40000 | - | - | 77.73 | 79.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/psanet/psanet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946-1f560f9e.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/psanet/psanet_r101-d8_512x512_40k_voc12aug/psanet_r101-d8_512x512_40k_voc12aug_20200613_161946.log.json) | diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_rope.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_rope.py deleted file mode 100644 index 067c6f067acbf27fb0fef5c2b812c22474c4fcd0..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/modules/test_rope.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from audiocraft.modules.rope import RotaryEmbedding -from audiocraft.modules.transformer import StreamingTransformer, set_efficient_attention_backend - - -def test_rope(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_rope_io_dtypes(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32) - rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64) - - # Test bfloat16 inputs w/ both 32 and 64 precision rope. - xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16) - xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16) - assert xq_out.dtype == torch.bfloat16 - - # Test float32 inputs w/ both 32 and 64 precision rope. - xq_32 = torch.rand((B, T, H, C)).to(torch.float32) - xk_32 = torch.rand((B, T, H, C)).to(torch.float32) - xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32) - assert xq_out.dtype == torch.float32 - - -def test_transformer_with_rope(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - for pos in ['rope', 'sin_rope']: - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding=pos) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - out = tr(x) - assert list(out.shape) == list(x.shape) - - -@torch.no_grad() -def test_rope_streaming(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, causal=True, dropout=0., - custom=True, positional_embedding='rope') - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -@torch.no_grad() -def test_rope_streaming_past_context(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - - for context in [None, 10]: - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=True, - dropout=0., positional_embedding='rope') - tr.eval() - - steps = 20 - x = torch.randn(3, steps, 16) - ref = tr(x) - - with tr.streaming(): - outs = [] - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr(frame)) - - out = torch.cat(outs, dim=1) - assert list(out.shape) == [3, steps, 16] - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_rope_memory_efficient(): - set_efficient_attention_backend('xformers') - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1, - positional_embedding='rope') - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - # Check at float precision b/c this is the rope default. - assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm() - - -def test_rope_with_xpos(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert list(xq_out.shape) == [B, T, H, C] - assert list(xk_out.shape) == [B, T, H, C] - - -def test_positional_scale(): - set_efficient_attention_backend('xformers') - B, T, H, C = 8, 75, 16, 128 - - rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0) - xq = torch.rand((B, T, H, C)) - xk = torch.rand((B, T, H, C)) - xq_out, xk_out = rope.rotate_qk(xq, xk, start=7) - - assert torch.allclose(xq, xq_out) - assert torch.allclose(xk, xk_out) diff --git a/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/joints2smpl/src/prior.py b/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/joints2smpl/src/prior.py deleted file mode 100644 index 7f13806dd1f6607507b0c7e5ad463b3fb0026be8..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/VQ-Trans/visualize/joints2smpl/src/prior.py +++ /dev/null @@ -1,230 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import sys -import os - -import time -import pickle - -import numpy as np - -import torch -import torch.nn as nn - -DEFAULT_DTYPE = torch.float32 - - -def create_prior(prior_type, **kwargs): - if prior_type == 'gmm': - prior = MaxMixturePrior(**kwargs) - elif prior_type == 'l2': - return L2Prior(**kwargs) - elif prior_type == 'angle': - return SMPLifyAnglePrior(**kwargs) - elif prior_type == 'none' or prior_type is None: - # Don't use any pose prior - def no_prior(*args, **kwargs): - return 0.0 - prior = no_prior - else: - raise ValueError('Prior {}'.format(prior_type) + ' is not implemented') - return prior - - -class SMPLifyAnglePrior(nn.Module): - def __init__(self, dtype=torch.float32, **kwargs): - super(SMPLifyAnglePrior, self).__init__() - - # Indices for the roration angle of - # 55: left elbow, 90deg bend at -np.pi/2 - # 58: right elbow, 90deg bend at np.pi/2 - # 12: left knee, 90deg bend at np.pi/2 - # 15: right knee, 90deg bend at np.pi/2 - angle_prior_idxs = np.array([55, 58, 12, 15], dtype=np.int64) - angle_prior_idxs = torch.tensor(angle_prior_idxs, dtype=torch.long) - self.register_buffer('angle_prior_idxs', angle_prior_idxs) - - angle_prior_signs = np.array([1, -1, -1, -1], - dtype=np.float32 if dtype == torch.float32 - else np.float64) - angle_prior_signs = torch.tensor(angle_prior_signs, - dtype=dtype) - self.register_buffer('angle_prior_signs', angle_prior_signs) - - def forward(self, pose, with_global_pose=False): - ''' Returns the angle prior loss for the given pose - - Args: - pose: (Bx[23 + 1] * 3) torch tensor with the axis-angle - representation of the rotations of the joints of the SMPL model. - Kwargs: - with_global_pose: Whether the pose vector also contains the global - orientation of the SMPL model. If not then the indices must be - corrected. - Returns: - A sze (B) tensor containing the angle prior loss for each element - in the batch. - ''' - angle_prior_idxs = self.angle_prior_idxs - (not with_global_pose) * 3 - return torch.exp(pose[:, angle_prior_idxs] * - self.angle_prior_signs).pow(2) - - -class L2Prior(nn.Module): - def __init__(self, dtype=DEFAULT_DTYPE, reduction='sum', **kwargs): - super(L2Prior, self).__init__() - - def forward(self, module_input, *args): - return torch.sum(module_input.pow(2)) - - -class MaxMixturePrior(nn.Module): - - def __init__(self, prior_folder='prior', - num_gaussians=6, dtype=DEFAULT_DTYPE, epsilon=1e-16, - use_merged=True, - **kwargs): - super(MaxMixturePrior, self).__init__() - - if dtype == DEFAULT_DTYPE: - np_dtype = np.float32 - elif dtype == torch.float64: - np_dtype = np.float64 - else: - print('Unknown float type {}, exiting!'.format(dtype)) - sys.exit(-1) - - self.num_gaussians = num_gaussians - self.epsilon = epsilon - self.use_merged = use_merged - gmm_fn = 'gmm_{:02d}.pkl'.format(num_gaussians) - - full_gmm_fn = os.path.join(prior_folder, gmm_fn) - if not os.path.exists(full_gmm_fn): - print('The path to the mixture prior "{}"'.format(full_gmm_fn) + - ' does not exist, exiting!') - sys.exit(-1) - - with open(full_gmm_fn, 'rb') as f: - gmm = pickle.load(f, encoding='latin1') - - if type(gmm) == dict: - means = gmm['means'].astype(np_dtype) - covs = gmm['covars'].astype(np_dtype) - weights = gmm['weights'].astype(np_dtype) - elif 'sklearn.mixture.gmm.GMM' in str(type(gmm)): - means = gmm.means_.astype(np_dtype) - covs = gmm.covars_.astype(np_dtype) - weights = gmm.weights_.astype(np_dtype) - else: - print('Unknown type for the prior: {}, exiting!'.format(type(gmm))) - sys.exit(-1) - - self.register_buffer('means', torch.tensor(means, dtype=dtype)) - - self.register_buffer('covs', torch.tensor(covs, dtype=dtype)) - - precisions = [np.linalg.inv(cov) for cov in covs] - precisions = np.stack(precisions).astype(np_dtype) - - self.register_buffer('precisions', - torch.tensor(precisions, dtype=dtype)) - - # The constant term: - sqrdets = np.array([(np.sqrt(np.linalg.det(c))) - for c in gmm['covars']]) - const = (2 * np.pi)**(69 / 2.) - - nll_weights = np.asarray(gmm['weights'] / (const * - (sqrdets / sqrdets.min()))) - nll_weights = torch.tensor(nll_weights, dtype=dtype).unsqueeze(dim=0) - self.register_buffer('nll_weights', nll_weights) - - weights = torch.tensor(gmm['weights'], dtype=dtype).unsqueeze(dim=0) - self.register_buffer('weights', weights) - - self.register_buffer('pi_term', - torch.log(torch.tensor(2 * np.pi, dtype=dtype))) - - cov_dets = [np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon) - for cov in covs] - self.register_buffer('cov_dets', - torch.tensor(cov_dets, dtype=dtype)) - - # The dimensionality of the random variable - self.random_var_dim = self.means.shape[1] - - def get_mean(self): - ''' Returns the mean of the mixture ''' - mean_pose = torch.matmul(self.weights, self.means) - return mean_pose - - def merged_log_likelihood(self, pose, betas): - diff_from_mean = pose.unsqueeze(dim=1) - self.means - - prec_diff_prod = torch.einsum('mij,bmj->bmi', - [self.precisions, diff_from_mean]) - diff_prec_quadratic = (prec_diff_prod * diff_from_mean).sum(dim=-1) - - curr_loglikelihood = 0.5 * diff_prec_quadratic - \ - torch.log(self.nll_weights) - # curr_loglikelihood = 0.5 * (self.cov_dets.unsqueeze(dim=0) + - # self.random_var_dim * self.pi_term + - # diff_prec_quadratic - # ) - torch.log(self.weights) - - min_likelihood, _ = torch.min(curr_loglikelihood, dim=1) - return min_likelihood - - def log_likelihood(self, pose, betas, *args, **kwargs): - ''' Create graph operation for negative log-likelihood calculation - ''' - likelihoods = [] - - for idx in range(self.num_gaussians): - mean = self.means[idx] - prec = self.precisions[idx] - cov = self.covs[idx] - diff_from_mean = pose - mean - - curr_loglikelihood = torch.einsum('bj,ji->bi', - [diff_from_mean, prec]) - curr_loglikelihood = torch.einsum('bi,bi->b', - [curr_loglikelihood, - diff_from_mean]) - cov_term = torch.log(torch.det(cov) + self.epsilon) - curr_loglikelihood += 0.5 * (cov_term + - self.random_var_dim * - self.pi_term) - likelihoods.append(curr_loglikelihood) - - log_likelihoods = torch.stack(likelihoods, dim=1) - min_idx = torch.argmin(log_likelihoods, dim=1) - weight_component = self.nll_weights[:, min_idx] - weight_component = -torch.log(weight_component) - - return weight_component + log_likelihoods[:, min_idx] - - def forward(self, pose, betas): - if self.use_merged: - return self.merged_log_likelihood(pose, betas) - else: - return self.log_likelihood(pose, betas) \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py deleted file mode 100644 index 09442206e19abf854f2f02754ec7c6f8bc564200..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/hubert_feature_reader.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import fairseq -import soundfile as sf -import torch.nn.functional as F - - -class HubertFeatureReader: - """ - Wrapper class to run inference on HuBERT model. - Helps extract features for a given audio file. - """ - - def __init__(self, checkpoint_path, layer, max_chunk=1600000): - ( - model, - cfg, - task, - ) = fairseq.checkpoint_utils.load_model_ensemble_and_task( - [checkpoint_path] - ) - self.model = model[0].eval().cuda() - self.task = task - self.layer = layer - self.max_chunk = max_chunk - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - assert sr == self.task.cfg.sample_rate, sr - if ref_len is not None and abs(ref_len - len(wav)) > 160: - print(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, file_path, ref_len=None): - x = self.read_audio(file_path, ref_len) - with torch.no_grad(): - x = torch.from_numpy(x).float().cuda() - if self.task.cfg.normalize: - x = F.layer_norm(x, x.shape) - x = x.view(1, -1) - - feat = [] - for start in range(0, x.size(1), self.max_chunk): - x_chunk = x[:, start: start + self.max_chunk] - feat_chunk, _ = self.model.extract_features( - source=x_chunk, - padding_mask=None, - mask=False, - output_layer=self.layer, - ) - feat.append(feat_chunk) - return torch.cat(feat, 1).squeeze(0) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/iterative_refinement_generator.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/iterative_refinement_generator.py deleted file mode 100644 index 4fb0946f499329ceb130761b59675d761df1c158..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/iterative_refinement_generator.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import namedtuple - -import numpy as np -import torch -from fairseq import utils - - -DecoderOut = namedtuple( - "IterativeRefinementDecoderOut", - ["output_tokens", "output_scores", "attn", "step", "max_step", "history"], -) - - -class IterativeRefinementGenerator(object): - def __init__( - self, - tgt_dict, - models=None, - eos_penalty=0.0, - max_iter=10, - max_ratio=2, - beam_size=1, - decoding_format=None, - retain_dropout=False, - adaptive=True, - retain_history=False, - reranking=False, - ): - """ - Generates translations based on iterative refinement. - - Args: - tgt_dict: target dictionary - eos_penalty: if > 0.0, it penalized early-stopping in decoding - max_iter: maximum number of refinement iterations - max_ratio: generate sequences of maximum length ax, where x is the source length - decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'} - retain_dropout: retaining dropout in the inference - adaptive: decoding with early stop - """ - self.bos = tgt_dict.bos() - self.pad = tgt_dict.pad() - self.unk = tgt_dict.unk() - self.eos = tgt_dict.eos() - self.vocab_size = len(tgt_dict) - self.eos_penalty = eos_penalty - self.max_iter = max_iter - self.max_ratio = max_ratio - self.beam_size = beam_size - self.reranking = reranking - self.decoding_format = decoding_format - self.retain_dropout = retain_dropout - self.retain_history = retain_history - self.adaptive = adaptive - self.models = models - - def generate_batched_itr( - self, - data_itr, - maxlen_a=None, - maxlen_b=None, - cuda=False, - timer=None, - prefix_size=0, - ): - """Iterate over a batched dataset and yield individual translations. - - Args: - maxlen_a/b: generate sequences of maximum length ax + b, - where x is the source sentence length. - cuda: use GPU for generation - timer: StopwatchMeter for timing generations. - """ - - for sample in data_itr: - if "net_input" not in sample: - continue - if timer is not None: - timer.start() - with torch.no_grad(): - hypos = self.generate( - self.models, - sample, - prefix_tokens=sample["target"][:, :prefix_size] - if prefix_size > 0 - else None, - ) - if timer is not None: - timer.stop(sample["ntokens"]) - for i, id in enumerate(sample["id"]): - # remove padding - src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad) - ref = utils.strip_pad(sample["target"][i, :], self.pad) - yield id, src, ref, hypos[i] - - @torch.no_grad() - def generate(self, models, sample, prefix_tokens=None, constraints=None): - if constraints is not None: - raise NotImplementedError( - "Constrained decoding with the IterativeRefinementGenerator is not supported" - ) - - # TODO: iterative refinement generator does not support ensemble for now. - if not self.retain_dropout: - for model in models: - model.eval() - - model, reranker = models[0], None - if self.reranking: - assert len(models) > 1, "Assuming the last checkpoint is the reranker" - assert ( - self.beam_size > 1 - ), "Reranking requires multiple translation for each example" - - reranker = models[-1] - models = models[:-1] - - if len(models) > 1 and hasattr(model, "enable_ensemble"): - assert model.allow_ensemble, "{} does not support ensembling".format( - model.__class__.__name__ - ) - model.enable_ensemble(models) - - # TODO: better encoder inputs? - src_tokens = sample["net_input"]["src_tokens"] - src_lengths = sample["net_input"]["src_lengths"] - bsz, src_len = src_tokens.size() - - # initialize - encoder_out = model.forward_encoder([src_tokens, src_lengths]) - prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens) - - if self.beam_size > 1: - assert ( - model.allow_length_beam - ), "{} does not support decoding with length beam.".format( - model.__class__.__name__ - ) - - # regenerate data based on length-beam - length_beam_order = ( - utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1) - ) - encoder_out = model.encoder.reorder_encoder_out( - encoder_out, length_beam_order - ) - prev_decoder_out = model.regenerate_length_beam( - prev_decoder_out, self.beam_size - ) - bsz = bsz * self.beam_size - - sent_idxs = torch.arange(bsz) - prev_output_tokens = prev_decoder_out.output_tokens.clone() - - if self.retain_history: - prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens]) - - finalized = [[] for _ in range(bsz)] - - def is_a_loop(x, y, s, a): - b, l_x, l_y = x.size(0), x.size(1), y.size(1) - if l_x > l_y: - y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1) - s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1) - if a is not None: - a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1) - elif l_x < l_y: - x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1) - return (x == y).all(1), y, s, a - - def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn): - cutoff = prev_out_token.ne(self.pad) - tokens = prev_out_token[cutoff] - if prev_out_score is None: - scores, score = None, None - else: - scores = prev_out_score[cutoff] - score = scores.mean() - - if prev_out_attn is None: - hypo_attn, alignment = None, None - else: - hypo_attn = prev_out_attn[cutoff] - alignment = hypo_attn.max(dim=1)[1] - return { - "steps": step, - "tokens": tokens, - "positional_scores": scores, - "score": score, - "hypo_attn": hypo_attn, - "alignment": alignment, - } - - for step in range(self.max_iter + 1): - - decoder_options = { - "eos_penalty": self.eos_penalty, - "max_ratio": self.max_ratio, - "decoding_format": self.decoding_format, - } - prev_decoder_out = prev_decoder_out._replace( - step=step, - max_step=self.max_iter + 1, - ) - - decoder_out = model.forward_decoder( - prev_decoder_out, encoder_out, **decoder_options - ) - - if self.adaptive: - # terminate if there is a loop - terminated, out_tokens, out_scores, out_attn = is_a_loop( - prev_output_tokens, - decoder_out.output_tokens, - decoder_out.output_scores, - decoder_out.attn, - ) - decoder_out = decoder_out._replace( - output_tokens=out_tokens, - output_scores=out_scores, - attn=out_attn, - ) - - else: - terminated = decoder_out.output_tokens.new_zeros( - decoder_out.output_tokens.size(0) - ).bool() - - if step == self.max_iter: # reach last iteration, terminate - terminated.fill_(1) - - # collect finalized sentences - finalized_idxs = sent_idxs[terminated] - finalized_tokens = decoder_out.output_tokens[terminated] - finalized_scores = decoder_out.output_scores[terminated] - finalized_attn = ( - None - if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) - else decoder_out.attn[terminated] - ) - - if self.retain_history: - finalized_history_tokens = [h[terminated] for h in decoder_out.history] - - for i in range(finalized_idxs.size(0)): - finalized[finalized_idxs[i]] = [ - finalized_hypos( - step, - finalized_tokens[i], - finalized_scores[i], - None if finalized_attn is None else finalized_attn[i], - ) - ] - - if self.retain_history: - finalized[finalized_idxs[i]][0]["history"] = [] - for j in range(len(finalized_history_tokens)): - finalized[finalized_idxs[i]][0]["history"].append( - finalized_hypos( - step, finalized_history_tokens[j][i], None, None - ) - ) - - # check if all terminated - if terminated.sum() == terminated.size(0): - break - - # for next step - not_terminated = ~terminated - prev_decoder_out = decoder_out._replace( - output_tokens=decoder_out.output_tokens[not_terminated], - output_scores=decoder_out.output_scores[not_terminated], - attn=decoder_out.attn[not_terminated] - if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0) - else None, - history=[h[not_terminated] for h in decoder_out.history] - if decoder_out.history is not None - else None, - ) - encoder_out = model.encoder.reorder_encoder_out( - encoder_out, not_terminated.nonzero(as_tuple=False).squeeze() - ) - sent_idxs = sent_idxs[not_terminated] - prev_output_tokens = prev_decoder_out.output_tokens.clone() - - if self.beam_size > 1: - if reranker is not None: - finalized = self.rerank( - reranker, finalized, [src_tokens, src_lengths], self.beam_size - ) - - # aggregate information from length beam - finalized = [ - finalized[ - np.argmax( - [ - finalized[self.beam_size * i + j][0]["score"] - for j in range(self.beam_size) - ] - ) - + self.beam_size * i - ] - for i in range(len(finalized) // self.beam_size) - ] - - return finalized - - def rerank(self, reranker, finalized, encoder_input, beam_size): - def rebuild_batch(finalized): - finalized_tokens = [f[0]["tokens"] for f in finalized] - finalized_maxlen = max(f.size(0) for f in finalized_tokens) - final_output_tokens = ( - finalized_tokens[0] - .new_zeros(len(finalized_tokens), finalized_maxlen) - .fill_(self.pad) - ) - for i, f in enumerate(finalized_tokens): - final_output_tokens[i, : f.size(0)] = f - return final_output_tokens - - final_output_tokens = rebuild_batch(finalized) - final_output_tokens[ - :, 0 - ] = self.eos # autoregressive model assumes starting with EOS - - reranker_encoder_out = reranker.encoder(*encoder_input) - length_beam_order = ( - utils.new_arange( - final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1) - ) - .t() - .reshape(-1) - ) - reranker_encoder_out = reranker.encoder.reorder_encoder_out( - reranker_encoder_out, length_beam_order - ) - reranking_scores = reranker.get_normalized_probs( - reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), - True, - None, - ) - reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None]) - reranking_masks = final_output_tokens[:, 1:].ne(self.pad) - reranking_scores = ( - reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1) - ) - reranking_scores = reranking_scores / reranking_masks.sum(1).type_as( - reranking_scores - ) - - for i in range(len(finalized)): - finalized[i][0]["score"] = reranking_scores[i] - - return finalized diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/composite_encoder.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/composite_encoder.py deleted file mode 100644 index 4e20fe3a833a2d87876cbec294ad2bebfba7f591..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/composite_encoder.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .fairseq_encoder import FairseqEncoder - - -class CompositeEncoder(FairseqEncoder): - """ - A wrapper around a dictionary of :class:`FairseqEncoder` objects. - - We run forward on each encoder and return a dictionary of outputs. The first - encoder's dictionary is used for initialization. - - Args: - encoders (dict): a dictionary of :class:`FairseqEncoder` objects. - """ - - def __init__(self, encoders): - super().__init__(next(iter(encoders.values())).dictionary) - self.encoders = encoders - for key in self.encoders: - self.add_module(key, self.encoders[key]) - - def forward(self, src_tokens, src_lengths): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of shape - `(batch)` - - Returns: - dict: - the outputs from each Encoder - """ - encoder_out = {} - for key in self.encoders: - encoder_out[key] = self.encoders[key](src_tokens, src_lengths) - return encoder_out - - def reorder_encoder_out(self, encoder_out, new_order): - """Reorder encoder output according to new_order.""" - for key in self.encoders: - encoder_out[key] = self.encoders[key].reorder_encoder_out( - encoder_out[key], new_order - ) - return encoder_out - - def max_positions(self): - return min(self.encoders[key].max_positions() for key in self.encoders) - - def upgrade_state_dict(self, state_dict): - for key in self.encoders: - self.encoders[key].upgrade_state_dict(state_dict) - return state_dict diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/quantization_utils.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/quantization_utils.py deleted file mode 100644 index 11fc414c852b199b80a569bf024272535929abcc..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/quantization_utils.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -from fairseq.modules.quantization import pq, quantization_options, scalar -from omegaconf import DictConfig - - -logger = logging.getLogger(__name__) - - -def quantize_model_scalar(model, model_cfg: DictConfig): - quant_noise_scalar = getattr(model_cfg, "quant_noise_scalar", 0) or 0 - if quant_noise_scalar > 0: - # quantize_model edits the model in place - scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000) - return model - - -class Quantizer(object): - def __init__(self, config_path, max_epoch, max_update): - try: - import yaml - except ImportError: - raise ImportError("Please install yaml with: pip install yaml") - - # parse config - if config_path: - with open(config_path) as config_file: - config = quantization_options.parse_config_yaml( - yaml.safe_load(config_file) - ) - else: - config = quantization_options.parse_config_yaml({}) - - self.n_centroids_config = config["n_centroids"] - self.block_sizes_config = config["block_sizes"] - self.layers_to_quantize = config["layers_to_quantize"] - - # We assume that training will run for a fixed number of epochs - # (or updates) and that we should train for equal durations - # between iterations of PQ. - num_iterations = len(self.layers_to_quantize) - if max_epoch > 0: - assert max_epoch % num_iterations == 0, ( - "for iterative PQ, --max-epoch (={}) must be evenly divisible by " - "len(layers_to_quantize) (={})".format(max_epoch, num_iterations) - ) - self.epoch_schedule = max_epoch // num_iterations - else: - self.epoch_schedule = None - if max_update > 0: - assert max_update % num_iterations == 0, ( - "for iterative PQ, --max-update (={}) must be evenly divisible by " - "len(layers_to_quantize) (={})".format(max_update, num_iterations) - ) - self.update_schedule = max_update // num_iterations - else: - self.update_schedule = None - assert (self.epoch_schedule is not None) ^ ( - self.update_schedule is not None - ), "for iterative PQ, cannot specify both --max-update and --max-epoch" - - # 0 is a special value for quantization step, which will force - # the first call to begin_epoch() to call step() - self.quantization_step = 0 - - def set_trainer(self, trainer): - self.trainer = trainer - self.size_tracker = pq.SizeTracker(self.trainer.get_model()) - - def step(self): - """Move to the next stage of quantization.""" - if self.quantization_step >= len(self.layers_to_quantize): - # Maybe we just finished the last training step or we loaded - # a checkpoint for an iterative PQ model which previously - # finished training. Either way, don't quantize again. - return - - logger.info( - "quantizing model (step={}; layers_to_quantize[step]={})".format( - self.quantization_step, self.layers_to_quantize[self.quantization_step] - ) - ) - quantized_layers = pq.quantize_model_( - self.trainer.get_model(), - self.size_tracker, - self.layers_to_quantize, - self.block_sizes_config, - self.n_centroids_config, - step=self.quantization_step, - ) - logger.info("quantized layers: {}".format(quantized_layers)) - logger.info(self.size_tracker) - - self.quantization_step += 1 - - # reintialize the Trainer since model parameters have changed - self.trainer.reinitialize() - - def begin_epoch(self, epoch): - """Called at the beginning of each epoch (epochs start at 1).""" - if ( - ( - self.epoch_schedule is not None - and epoch > 0 - and (epoch - 1) % self.epoch_schedule == 0 - ) - # we always step once in the beginning, even if using - # update-based quantization - or self.quantization_step == 0 - ): - self.step() - - def step_update(self, num_updates): - """Called at the end of each step.""" - if ( - self.update_schedule is not None - and num_updates > 0 - and num_updates % self.update_schedule == 0 - ): - self.step() - - def state_dict(self): - return { - "n_centroids_config": self.n_centroids_config, - "block_sizes_config": self.block_sizes_config, - "layers_to_quantize": self.layers_to_quantize, - "epoch_schedule": self.epoch_schedule, - "update_schedule": self.update_schedule, - "quantization_step": self.quantization_step, - } - - def load_state_dict(self, state_dict): - self.n_centroids_config = state_dict["n_centroids_config"] - self.block_sizes_config = state_dict["block_sizes_config"] - self.layers_to_quantize = state_dict["layers_to_quantize"] - self.epoch_schedule = state_dict["epoch_schedule"] - self.update_schedule = state_dict["update_schedule"] - self.quantization_step = state_dict["quantization_step"] diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/__init__.py deleted file mode 100644 index 5ca74d790a95a2b14d3fbb0cf9f0a9959416d305..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .ofa import OFAModel, ofa_base_architecture, ofa_large_architecture, ofa_huge_architecture \ No newline at end of file diff --git a/spaces/HighCWu/Style2Paints-4-Gradio/linefiller/thinning.py b/spaces/HighCWu/Style2Paints-4-Gradio/linefiller/thinning.py deleted file mode 100644 index 6efc3481a1a132f4920b7769c72f6dea7e6a147f..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/Style2Paints-4-Gradio/linefiller/thinning.py +++ /dev/null @@ -1,44 +0,0 @@ -import numpy as np -import cv2 -from numba import njit - - -@njit -def njit_thin(points, maps): - result = maps.copy() - h, w = maps.shape[:2] - for _ in range(len(points[0])): - x = points[0][_] - y = points[1][_] - if x > 0: - a = maps[x-1, y] - if a > 0: - result[x, y] = a - continue - if y > 0: - a = maps[x, y-1] - if a > 0: - result[x, y] = a - continue - if x + 1 < h: - a = maps[x+1, y] - if a > 0: - result[x, y] = a - continue - if y + 1 < w: - a = maps[x, y+1] - if a > 0: - result[x, y] = a - continue - return result - - -def thinning(fillmap, max_iter=100): - result = fillmap.copy() - for iterNum in range(max_iter): - line_points = np.where(result == 0) - if not len(line_points[0]) > 0: - break - result = njit_thin(line_points, result) - return result - diff --git a/spaces/HighCWu/starganv2vc-paddle/test_arch.py b/spaces/HighCWu/starganv2vc-paddle/test_arch.py deleted file mode 100644 index 5729474cc66b36a0ea136247b7f6fffbd62dc9dd..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/starganv2vc-paddle/test_arch.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -#coding:utf-8 - -import os -import yaml -import paddle -import click -import warnings -warnings.simplefilter('ignore') - -from munch import Munch - -from starganv2vc_paddle.models import build_model - -from starganv2vc_paddle.Utils.ASR.models import ASRCNN -from starganv2vc_paddle.Utils.JDC.model import JDCNet - - -@click.command() -@click.option('-p', '--config_path', default='Configs/config.yml', type=str) - -def main(config_path): - config = yaml.safe_load(open(config_path)) - - # load ASR model - ASR_config = config.get('ASR_config', False) - with open(ASR_config) as f: - ASR_config = yaml.safe_load(f) - ASR_model_config = ASR_config['model_params'] - ASR_model = ASRCNN(**ASR_model_config) - _ = ASR_model.eval() - - # load F0 model - F0_model = JDCNet(num_class=1, seq_len=192) - _ = F0_model.eval() - - # build model - _, model_ema = build_model(Munch(config['model_params']), F0_model, ASR_model) - - asr_input = paddle.randn([4, 80, 192]) - print('ASR model input:', asr_input.shape, 'output:', ASR_model(asr_input).shape) - mel_input = paddle.randn([4, 1, 192, 512]) - print('F0 model input:', mel_input.shape, 'output:', [t.shape for t in F0_model(mel_input)]) - - _ = [v.eval() for v in model_ema.values()] - label = paddle.to_tensor([0,1,2,3], dtype=paddle.int64) - latent_dim = model_ema.mapping_network.shared[0].weight.shape[0] - latent_style = paddle.randn([4, latent_dim]) - ref = model_ema.mapping_network(latent_style, label) - mel_input2 = paddle.randn([4, 1, 192, 512]) - style_ref = model_ema.style_encoder(mel_input2, label) - print('StyleGANv2-VC encoder inputs:', mel_input2.shape, 'output:', style_ref.shape, 'should has the same shape as the ref:', ref.shape) - f0_feat = F0_model.get_feature_GAN(mel_input) - out = model_ema.generator(mel_input, style_ref, F0=f0_feat) - print('StyleGANv2-VC inputs:', label.shape, latent_style.shape, mel_input.shape, 'output:', out.shape) - - paddle.save({k: v.state_dict() for k, v in model_ema.items()}, 'test_arch.pd') - file_size = os.path.getsize('test_arch.pd') / float(1024*1024) - print(f'Main models occupied {file_size:.2f} MB') - os.remove('test_arch.pd') - - return 0 - -if __name__=="__main__": - main() diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/README.md b/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/README.md deleted file mode 100644 index 7f386decd87d93bf701e2e313c7fea39d982224f..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/commonsense_qa/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Finetuning RoBERTa on Commonsense QA - -We follow a similar approach to [finetuning RACE](../README.race.md). Specifically -for each question we construct five inputs, one for each of the five candidate -answer choices. Each input is constructed by concatenating the question and -candidate answer. We then encode each input and pass the resulting "[CLS]" -representations through a fully-connected layer to predict the correct answer. -We train with a standard cross-entropy loss. - -We also found it helpful to prepend a prefix of `Q:` to the question and `A:` to -the answer. The complete input format is: -``` - Q: Where would I not want a fox? A: hen house -``` - -Our final submission is based on a hyperparameter search over the learning rate -(1e-5, 2e-5, 3e-5), batch size (8, 16), number of training steps (2000, 3000, -4000) and random seed. We selected the model with the best performance on the -development set after 100 trials. - -### 1) Download data from the Commonsense QA website (https://www.tau-nlp.org/commonsenseqa) -```bash -bash examples/roberta/commonsense_qa/download_cqa_data.sh -``` - -### 2) Finetune - -```bash -MAX_UPDATES=3000 # Number of training steps. -WARMUP_UPDATES=150 # Linearly increase LR over this many steps. -LR=1e-05 # Peak LR for polynomial LR scheduler. -MAX_SENTENCES=16 # Batch size. -SEED=1 # Random seed. -ROBERTA_PATH=/path/to/roberta/model.pt -DATA_DIR=data/CommonsenseQA - -# we use the --user-dir option to load the task from -# the examples/roberta/commonsense_qa directory: -FAIRSEQ_PATH=/path/to/fairseq -FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/commonsense_qa - -CUDA_VISIBLE_DEVICES=0 fairseq-train --fp16 --ddp-backend=legacy_ddp \ - $DATA_DIR \ - --user-dir $FAIRSEQ_USER_DIR \ - --restore-file $ROBERTA_PATH \ - --reset-optimizer --reset-dataloader --reset-meters \ - --no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --task commonsense_qa --init-token 0 --bpe gpt2 \ - --arch roberta_large --max-positions 512 \ - --dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \ - --criterion sentence_ranking --num-classes 5 \ - --optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR \ - --warmup-updates $WARMUP_UPDATES --total-num-update $MAX_UPDATES \ - --batch-size $MAX_SENTENCES \ - --max-update $MAX_UPDATES \ - --log-format simple --log-interval 25 \ - --seed $SEED -``` - -The above command assumes training on 1 GPU with 32GB of RAM. For GPUs with -less memory, decrease `--batch-size` and increase `--update-freq` -accordingly to compensate. - -### 3) Evaluate -```python -import json -import torch -from fairseq.models.roberta import RobertaModel -from examples.roberta import commonsense_qa # load the Commonsense QA task -roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'data/CommonsenseQA') -roberta.eval() # disable dropout -roberta.cuda() # use the GPU (optional) -nsamples, ncorrect = 0, 0 -with open('data/CommonsenseQA/valid.jsonl') as h: - for line in h: - example = json.loads(line) - scores = [] - for choice in example['question']['choices']: - input = roberta.encode( - 'Q: ' + example['question']['stem'], - 'A: ' + choice['text'], - no_separator=True - ) - score = roberta.predict('sentence_classification_head', input, return_logits=True) - scores.append(score) - pred = torch.cat(scores).argmax() - answer = ord(example['answerKey']) - ord('A') - nsamples += 1 - if pred == answer: - ncorrect += 1 - -print('Accuracy: ' + str(ncorrect / float(nsamples))) -# Accuracy: 0.7846027846027847 -``` - -The above snippet is not batched, which makes it quite slow. See [instructions -for batched prediction with RoBERTa](https://github.com/pytorch/fairseq/tree/main/examples/roberta#batched-prediction). diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer_lm.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer_lm.py deleted file mode 100644 index eedd5151ba5b1a7050b37639023cf8a158fae8d4..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer_lm.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from dataclasses import dataclass, field -from typing import Optional - -from fairseq import options, utils -from fairseq.dataclass import ChoiceEnum, FairseqDataclass -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import ( - DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder -) -from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder -from fairseq.utils import safe_getattr, safe_hasattr -from omegaconf import II - - -DEFAULT_MAX_TARGET_POSITIONS = 1024 - - -@dataclass -class TransformerLanguageModelConfig(FairseqDataclass): - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="relu", metadata={"help": "activation function to use"} - ) - dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) - attention_dropout: float = field( - default=0.0, metadata={"help": "dropout probability for attention weights"} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - relu_dropout: float = field( - default=0.0, metadata={"help": "dropout probability after activation in FFN."} - ) - decoder_embed_dim: int = field( - default=512, metadata={"help": "decoder embedding dimension"} - ) - decoder_output_dim: int = field( - default=512, metadata={"help": "decoder output dimension"} - ) - decoder_input_dim: int = field( - default=512, metadata={"help": "decoder input dimension"} - ) - decoder_ffn_embed_dim: int = field( - default=2048, metadata={"help": "decoder embedding dimension for FFN"} - ) - decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"}) - decoder_attention_heads: int = field( - default=8, metadata={"help": "num decoder attention heads"} - ) - decoder_normalize_before: bool = field( - default=False, metadata={"help": "apply layernorm before each decoder block"} - ) - no_decoder_final_norm: bool = field( - default=False, - metadata={"help": "don't add an extra layernorm after the last decoder block"}, - ) - adaptive_softmax_cutoff: Optional[str] = field( - default=None, - metadata={ - "help": "comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion" - }, - ) - adaptive_softmax_dropout: float = field( - default=0, - metadata={"help": "sets adaptive softmax dropout for the tail projections"}, - ) - adaptive_softmax_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - no_token_positional_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, disables positional embeddings (outside self attention)" - }, - ) - share_decoder_input_output_embed: bool = field( - default=False, metadata={"help": "share decoder input and output embeddings"} - ) - character_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, uses character embedding convolutions to produce token embeddings" - }, - ) - character_filters: str = field( - default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]", - metadata={"help": "size of character embeddings"}, - ) - character_embedding_dim: int = field( - default=4, metadata={"help": "size of character embeddings"} - ) - char_embedder_highway_layers: int = field( - default=2, - metadata={"help": "number of highway layers for character token embeddder"}, - ) - adaptive_input: bool = field( - default=False, metadata={"help": "if set, uses adaptive input"} - ) - adaptive_input_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - adaptive_input_cutoff: Optional[str] = field( - default=None, - metadata={"help": "comma separated list of adaptive input cutoff points."}, - ) - tie_adaptive_weights: bool = field( - default=False, - metadata={ - "help": "if set, ties the weights of adaptive softmax and adaptive input" - }, - ) - tie_adaptive_proj: bool = field( - default=False, - metadata={ - "help": "if set, ties the projection weights of adaptive softmax and adaptive input" - }, - ) - decoder_learned_pos: bool = field( - default=False, - metadata={"help": "use learned positional embeddings in the decoder"}, - ) - layernorm_embedding: bool = field( - default=False, metadata={"help": "add layernorm to embedding"} - ) - no_scale_embedding: bool = field( - default=False, metadata={"help": "if True, dont scale embeddings"} - ) - checkpoint_activations: bool = field( - default=False, metadata={"help": "checkpoint activations at each layer"} - ) - offload_activations: bool = field( - default=False, - metadata={"help": "move checkpointed activations to CPU after they are used."}, - ) - # config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) - decoder_layerdrop: float = field( - default=0.0, metadata={"help": "LayerDrop probability for decoder"} - ) - decoder_layers_to_keep: Optional[str] = field( - default=None, - metadata={ - "help": "which layers to *keep* when pruning as a comma-separated list" - }, - ) - # config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) - quant_noise_pq: float = field( - default=0.0, - metadata={"help": "iterative PQ quantization noise at training time"}, - ) - quant_noise_pq_block_size: int = field( - default=8, - metadata={"help": "block size of quantization noise at training time"}, - ) - quant_noise_scalar: float = field( - default=0.0, - metadata={ - "help": "scalar quantization noise and scalar quantization at training time" - }, - ) - # config for Fully Sharded Data Parallel (FSDP) training - min_params_to_wrap: int = field( - default=DEFAULT_MIN_PARAMS_TO_WRAP, - metadata={ - "help": ( - "minimum number of params for a layer to be wrapped with FSDP() when " - "training with --ddp-backend=fully_sharded. Smaller values will " - "improve memory efficiency, but may make torch.distributed " - "communication less efficient due to smaller input sizes. This option " - "is set to 0 (i.e., always wrap) when --checkpoint-activations or " - "--offload-activations are passed." - ) - } - ) - # config for "BASE Layers: Simplifying Training of Large, Sparse Models" - base_layers: Optional[int] = field( - default=0, metadata={"help": "number of BASE layers in total"} - ) - base_sublayers: Optional[int] = field( - default=1, metadata={"help": "number of sublayers in each BASE layer"} - ) - base_shuffle: Optional[int] = field( - default=1, metadata={"help": "shuffle tokens between workers before computing assignment"} - ) - # options from other parts of the config - add_bos_token: bool = II("task.add_bos_token") - tokens_per_sample: int = II("task.tokens_per_sample") - max_target_positions: Optional[int] = II("task.max_target_positions") - tpu: bool = II("common.tpu") - - -@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) -class TransformerLanguageModel(FairseqLanguageModel): - @classmethod - def hub_models(cls): - def moses_fastbpe(path): - return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"} - - def spm(path): - return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"} - - return { - "transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2", - "transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2", - "transformer_lm.wmt19.en": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2" - ), - "transformer_lm.wmt19.de": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2" - ), - "transformer_lm.wmt19.ru": moses_fastbpe( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2" - ), - "transformer_lm.wmt20.en": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz" - ), - "transformer_lm.wmt20.ta": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz" - ), - "transformer_lm.wmt20.iu.news": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz" - ), - "transformer_lm.wmt20.iu.nh": spm( - "https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz" - ), - } - - def __init__(self, decoder): - super().__init__(decoder) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - if args.decoder_layers_to_keep: - args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) - - if safe_getattr(args, "max_target_positions", None) is None: - args.max_target_positions = safe_getattr( - args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS - ) - - if args.character_embeddings: - embed_tokens = CharacterTokenEmbedder( - task.source_dictionary, - eval(args.character_filters), - args.character_embedding_dim, - args.decoder_embed_dim, - args.char_embedder_highway_layers, - ) - elif args.adaptive_input: - embed_tokens = AdaptiveInput( - len(task.source_dictionary), - task.source_dictionary.pad(), - args.decoder_input_dim, - args.adaptive_input_factor, - args.decoder_embed_dim, - options.eval_str_list(args.adaptive_input_cutoff, type=int), - args.quant_noise_pq, - args.quant_noise_pq_block_size, - ) - else: - embed_tokens = cls.build_embedding( - args, task.source_dictionary, args.decoder_input_dim - ) - - if args.tie_adaptive_weights: - assert args.adaptive_input - assert args.adaptive_input_factor == args.adaptive_softmax_factor - assert ( - args.adaptive_softmax_cutoff == args.adaptive_input_cutoff - ), "{} != {}".format( - args.adaptive_softmax_cutoff, args.adaptive_input_cutoff - ) - assert args.decoder_input_dim == args.decoder_output_dim - - decoder = TransformerDecoder( - args, task.target_dictionary, embed_tokens, no_encoder_attn=True - ) - return cls(decoder) - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad()) - return embed_tokens - - -def base_lm_architecture(args): - # backward compatibility for older model checkpoints - if safe_hasattr(args, "no_tie_adaptive_proj"): - # previous models defined --no-tie-adaptive-proj, so use the existence of - # that option to determine if this is an "old" model checkpoint - args.no_decoder_final_norm = True # old models always set this to True - if args.no_tie_adaptive_proj is False: - args.tie_adaptive_proj = True - if safe_hasattr(args, "decoder_final_norm"): - args.no_decoder_final_norm = not args.decoder_final_norm - - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048) - args.decoder_layers = safe_getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0) - args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4) - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False) - args.activation_fn = safe_getattr(args, "activation_fn", "relu") - - args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0) - args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None) - args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0) - args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8) - args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0) - - args.base_layers = safe_getattr(args, "base_layers", 0) - args.base_sublayers = safe_getattr(args, "base_sublayers", 1) - args.base_shuffle = safe_getattr(args, "base_shuffle", False) - - args.add_bos_token = safe_getattr(args, "add_bos_token", False) - args.no_token_positional_embeddings = safe_getattr( - args, "no_token_positional_embeddings", False - ) - args.share_decoder_input_output_embed = safe_getattr( - args, "share_decoder_input_output_embed", False - ) - args.character_embeddings = safe_getattr(args, "character_embeddings", False) - - args.decoder_output_dim = safe_getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = safe_getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # Model training is not stable without this - args.decoder_normalize_before = True - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False) - - args.adaptive_input = safe_getattr(args, "adaptive_input", False) - args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None) - - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False) - - args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False) - args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False) - args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False) - args.offload_activations = safe_getattr(args, "offload_activations", False) - if args.offload_activations: - args.checkpoint_activations = True - - -@register_model_architecture("transformer_lm", "transformer_lm_big") -def transformer_lm_big(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_wiki103") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103") -def transformer_lm_baevski_wiki103(args): - args.decoder_layers = safe_getattr(args, "decoder_layers", 16) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8) - args.dropout = safe_getattr(args, "dropout", 0.3) - args.adaptive_input = safe_getattr(args, "adaptive_input", True) - args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True) - args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", "20000,60000") - args.adaptive_softmax_cutoff = safe_getattr( - args, "adaptive_softmax_cutoff", "20000,60000" - ) - args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gbw") -@register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw") -def transformer_lm_baevski_gbw(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True) - transformer_lm_big(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt") -def transformer_lm_gpt(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072) - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_small") -def transformer_lm_gpt2_small(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096) - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny") -def transformer_lm_gpt2_tiny(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64) - args.decoder_layers = safe_getattr(args, "decoder_layers", 2) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium") -def transformer_lm_gpt2_medium(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120) - args.decoder_layers = safe_getattr(args, "decoder_layers", 36) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt2_big") -def transformer_lm_gpt2_big(args): - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600) - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400) - args.decoder_layers = safe_getattr(args, "decoder_layers", 48) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25) - args.dropout = safe_getattr(args, "dropout", 0.1) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - base_lm_architecture(args) - - -def base_gpt3_architecture(args): - args.decoder_input_dim = args.decoder_embed_dim - args.decoder_output_dim = args.decoder_embed_dim - args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4) - # GPT-3 used learned positional embeddings, rather than sinusoidal - args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True) - args.dropout = safe_getattr(args, "dropout", 0.0) - args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0) - args.activation_fn = safe_getattr(args, "activation_fn", "gelu") - args.share_decoder_input_output_embed = True - base_lm_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_small") -def transformer_lm_gpt3_small(args): - # 125M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 12) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium") -def transformer_lm_gpt3_medium(args): - # 350M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_large") -def transformer_lm_gpt3_large(args): - # 760M params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl") -def transformer_lm_gpt3_xl(args): - # 1.3B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 24) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7") -def transformer_lm_gpt3_2_7(args): - # 2.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7") -def transformer_lm_gpt3_6_7(args): - # 6.7B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 32) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_13") -def transformer_lm_gpt3_13(args): - # 13B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 40) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40) - base_gpt3_architecture(args) - - -@register_model_architecture("transformer_lm", "transformer_lm_gpt3_175") -def transformer_lm_gpt3_175(args): - # 175B params - args.decoder_layers = safe_getattr(args, "decoder_layers", 96) - args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288) - args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96) - base_gpt3_architecture(args) diff --git a/spaces/Illumotion/Koboldcpp/include/CL/cl_va_api_media_sharing_intel.h b/spaces/Illumotion/Koboldcpp/include/CL/cl_va_api_media_sharing_intel.h deleted file mode 100644 index 547e90e889f15053711d04cc30bfd2ce959d1014..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/include/CL/cl_va_api_media_sharing_intel.h +++ /dev/null @@ -1,163 +0,0 @@ -/******************************************************************************* - * Copyright (c) 2008-2020 The Khronos Group Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ - -#ifndef __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H -#define __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/*************************************************************** -* cl_intel_sharing_format_query_va_api -***************************************************************/ -#define cl_intel_sharing_format_query_va_api 1 - -/* when cl_intel_va_api_media_sharing is supported */ - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetSupportedVA_APIMediaSurfaceFormatsINTEL( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint plane, - cl_uint num_entries, - VAImageFormat* va_api_formats, - cl_uint* num_surface_formats) ; - -typedef cl_int (CL_API_CALL * -clGetSupportedVA_APIMediaSurfaceFormatsINTEL_fn)( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint plane, - cl_uint num_entries, - VAImageFormat* va_api_formats, - cl_uint* num_surface_formats) ; - -/****************************************** -* cl_intel_va_api_media_sharing extension * -*******************************************/ - -#define cl_intel_va_api_media_sharing 1 - -/* error codes */ -#define CL_INVALID_VA_API_MEDIA_ADAPTER_INTEL -1098 -#define CL_INVALID_VA_API_MEDIA_SURFACE_INTEL -1099 -#define CL_VA_API_MEDIA_SURFACE_ALREADY_ACQUIRED_INTEL -1100 -#define CL_VA_API_MEDIA_SURFACE_NOT_ACQUIRED_INTEL -1101 - -/* cl_va_api_device_source_intel */ -#define CL_VA_API_DISPLAY_INTEL 0x4094 - -/* cl_va_api_device_set_intel */ -#define CL_PREFERRED_DEVICES_FOR_VA_API_INTEL 0x4095 -#define CL_ALL_DEVICES_FOR_VA_API_INTEL 0x4096 - -/* cl_context_info */ -#define CL_CONTEXT_VA_API_DISPLAY_INTEL 0x4097 - -/* cl_mem_info */ -#define CL_MEM_VA_API_MEDIA_SURFACE_INTEL 0x4098 - -/* cl_image_info */ -#define CL_IMAGE_VA_API_PLANE_INTEL 0x4099 - -/* cl_command_type */ -#define CL_COMMAND_ACQUIRE_VA_API_MEDIA_SURFACES_INTEL 0x409A -#define CL_COMMAND_RELEASE_VA_API_MEDIA_SURFACES_INTEL 0x409B - -typedef cl_uint cl_va_api_device_source_intel; -typedef cl_uint cl_va_api_device_set_intel; - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetDeviceIDsFromVA_APIMediaAdapterINTEL( - cl_platform_id platform, - cl_va_api_device_source_intel media_adapter_type, - void* media_adapter, - cl_va_api_device_set_intel media_adapter_set, - cl_uint num_entries, - cl_device_id* devices, - cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_int (CL_API_CALL * clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn)( - cl_platform_id platform, - cl_va_api_device_source_intel media_adapter_type, - void* media_adapter, - cl_va_api_device_set_intel media_adapter_set, - cl_uint num_entries, - cl_device_id* devices, - cl_uint* num_devices) CL_API_SUFFIX__VERSION_1_2; - -extern CL_API_ENTRY cl_mem CL_API_CALL -clCreateFromVA_APIMediaSurfaceINTEL( - cl_context context, - cl_mem_flags flags, - VASurfaceID* surface, - cl_uint plane, - cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_mem (CL_API_CALL * clCreateFromVA_APIMediaSurfaceINTEL_fn)( - cl_context context, - cl_mem_flags flags, - VASurfaceID* surface, - cl_uint plane, - cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -extern CL_API_ENTRY cl_int CL_API_CALL -clEnqueueAcquireVA_APIMediaSurfacesINTEL( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem* mem_objects, - cl_uint num_events_in_wait_list, - const cl_event* event_wait_list, - cl_event* event) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_int (CL_API_CALL *clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn)( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem* mem_objects, - cl_uint num_events_in_wait_list, - const cl_event* event_wait_list, - cl_event* event) CL_API_SUFFIX__VERSION_1_2; - -extern CL_API_ENTRY cl_int CL_API_CALL -clEnqueueReleaseVA_APIMediaSurfacesINTEL( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem* mem_objects, - cl_uint num_events_in_wait_list, - const cl_event* event_wait_list, - cl_event* event) CL_API_SUFFIX__VERSION_1_2; - -typedef cl_int (CL_API_CALL *clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn)( - cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem* mem_objects, - cl_uint num_events_in_wait_list, - const cl_event* event_wait_list, - cl_event* event) CL_API_SUFFIX__VERSION_1_2; - -#ifdef __cplusplus -} -#endif - -#endif /* __OPENCL_CL_VA_API_MEDIA_SHARING_INTEL_H */ - diff --git a/spaces/Illumotion/Koboldcpp/tests/test-tokenizer-1-llama.cpp b/spaces/Illumotion/Koboldcpp/tests/test-tokenizer-1-llama.cpp deleted file mode 100644 index 3b2fc87ac48d8cbb5726c8ea02a5e6ae376a3def..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/tests/test-tokenizer-1-llama.cpp +++ /dev/null @@ -1,127 +0,0 @@ -#include "llama.h" -#include "common.h" -#include "console.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -typedef int codepoint; - -static std::string codepoint_to_utf8(codepoint cp) { - std::string result; - if (0x00 <= cp && cp <= 0x7f) { - result.push_back(cp); - } else if (0x80 <= cp && cp <= 0x7ff) { - result.push_back(0xc0 | ((cp >> 6) & 0x1f)); - result.push_back(0x80 | (cp & 0x3f)); - } else if (0x800 <= cp && cp <= 0xffff) { - result.push_back(0xe0 | ((cp >> 12) & 0x0f)); - result.push_back(0x80 | ((cp >> 6) & 0x3f)); - result.push_back(0x80 | (cp & 0x3f)); - } else if (0x10000 <= cp && cp <= 0x10ffff) { - result.push_back(0xf0 | ((cp >> 18) & 0x07)); - result.push_back(0x80 | ((cp >> 12) & 0x3f)); - result.push_back(0x80 | ((cp >> 6) & 0x3f)); - result.push_back(0x80 | (cp & 0x3f)); - } else { - throw std::invalid_argument("invalid codepoint"); - } - return result; -} - -int main(int argc, char **argv) { - if (argc < 2) { - fprintf(stderr, "Usage: %s \n", argv[0]); - return 1; - } - - const std::string fname = argv[1]; - - fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); - - llama_model * model; - llama_context * ctx; - - llama_backend_init(false); - - // load the vocab - { - auto mparams = llama_model_default_params(); - - mparams.vocab_only = true; - - model = llama_load_model_from_file(fname.c_str(), mparams); - - if (model == NULL) { - fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); - return 1; - } - - auto cparams = llama_context_default_params(); - - ctx = llama_new_context_with_model(model, cparams); - - if (ctx == NULL) { - fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); - llama_free_model(model); - return 1; - } - } - - GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM); - -#ifdef _WIN32 - // We need this for unicode console support - console::init(false, false); - atexit([]() { console::cleanup(); }); -#endif - - const int n_vocab = llama_n_vocab(model); - - for (int i = 0; i < n_vocab; ++i) { - std::string str = llama_detokenize_spm(ctx, std::vector(1, i)); - std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_spm(ctx, tokens); - if (check != str) { - fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n", - __func__, i, str.c_str(), str.length(), check.c_str(), check.length()); - return 2; - } - } - - for (codepoint cp = 0x0000; cp < 0xffff; ++cp) { - if (cp < 0xd800 || cp > 0xdfff) { - std::string str = codepoint_to_utf8(cp); - std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_spm(ctx, tokens); - if (cp != 9601 && str != check) { - fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n", - __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); - return 3; - } - } - } - for (codepoint cp = 0x10000; cp < 0x0010ffff; ++cp) { - std::string str = codepoint_to_utf8(cp); - std::vector tokens = llama_tokenize(ctx, str, false); - std::string check = llama_detokenize_spm(ctx, tokens); - if (str != check) { - fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n", - __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); - return 4; - } - } - - llama_free_model(model); - llama_free(ctx); - - llama_backend_free(); - - return 0; -} diff --git a/spaces/JUNGU/Talk2Carnegie/app.py b/spaces/JUNGU/Talk2Carnegie/app.py deleted file mode 100644 index 1f6d7fba1782c1af70e945c31891fe5ece19d799..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/Talk2Carnegie/app.py +++ /dev/null @@ -1,107 +0,0 @@ -from langchain.chat_models import ChatOpenAI -from langchain.document_loaders import PyPDFLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.embeddings.cohere import CohereEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch -from langchain.vectorstores import Chroma -from PyPDF2 import PdfWriter -import gradio as gr -import os -from dotenv import load_dotenv -import openai - -load_dotenv() - -os.environ["OPENAI_API_KEY"] = os.environ['my_secret'] - -loader = PyPDFLoader("/home/user/app/docs.pdf") -documents = loader.load() - -text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0) -texts = text_splitter.split_documents(documents) - -#vector embedding -embeddings = OpenAIEmbeddings() -vector_store = Chroma.from_documents(texts, embeddings) -retriever = vector_store.as_retriever(search_kwargs={"k": 2}) - -from langchain.chat_models import ChatOpenAI -from langchain.chains import RetrievalQAWithSourcesChain - -llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4 - -chain = RetrievalQAWithSourcesChain.from_chain_type( - llm=llm, - chain_type="stuff", - retriever = retriever, - return_source_documents=True) - -from langchain.prompts.chat import ( - ChatPromptTemplate, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate, -) - -system_template="""Use the following pieces of context to answer the users question shortly. -Given the following summaries of a long document and a question, create a final answer with references ("SOURCES"), use "SOURCES" in capital letters regardless of the number of sources. -If you don't know the answer, just say that "I don't know", don't try to make up an answer. ----------------- -{summaries} - -You MUST answer in Korean and in Markdown format:""" - -messages = [ - SystemMessagePromptTemplate.from_template(system_template), - HumanMessagePromptTemplate.from_template("{question}") -] - -prompt = ChatPromptTemplate.from_messages(messages) - -from langchain.chat_models import ChatOpenAI -from langchain.chains import RetrievalQAWithSourcesChain - -chain_type_kwargs = {"prompt": prompt} - -llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Modify model_name if you have access to GPT-4 - -chain = RetrievalQAWithSourcesChain.from_chain_type( - llm=llm, - chain_type="stuff", - retriever = retriever, - return_source_documents=True, - chain_type_kwargs=chain_type_kwargs -) - -query = "행복한 인생이란?" -result = chain(query) - - -for doc in result['source_documents']: - print('내용 : ' + doc.page_content[0:100].replace('\n', ' ')) - print('파일 : ' + doc.metadata['source']) - print('페이지 : ' + str(doc.metadata['page'])) - - -def respond(message, chat_history): # 채팅봇의 응답을 처리하는 함수를 정의합니다. - - result = chain(message) - - bot_message = result['answer'] - - for i, doc in enumerate(result['source_documents']): - bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') ' - - chat_history.append((message, bot_message)) # 채팅 기록에 사용자의 메시지와 봇의 응답을 추가합니다. - - return "", chat_history # 수정된 채팅 기록을 반환합니다. - -with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()를 사용하여 인터페이스를 생성합니다. - gr.Markdown("# 안녕하세요. 카네기와 대화해보세요. \n 답변을 위해 생각하는 시간이 걸릴 수 있습니다.") - chatbot = gr.Chatbot(label="채팅창") # '채팅창'이라는 레이블을 가진 채팅봇 컴포넌트를 생성합니다. - msg = gr.Textbox(label="입력") # '입력'이라는 레이블을 가진 텍스트박스를 생성합니다. - clear = gr.Button("초기화") # '초기화'라는 레이블을 가진 버튼을 생성합니다. - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) # 텍스트박스에 메시지를 입력하고 제출하면 respond 함수가 호출되도록 합니다. - clear.click(lambda: None, None, chatbot, queue=False) # '초기화' 버튼을 클릭하면 채팅 기록을 초기화합니다. -demo.launch(debug=True) # 인터페이스를 실행합니다. 실행하면 사용자는 '입력' 텍스트박스에 메시지를 작성하고 제출할 수 있으며, '초기화' 버튼을 통해 채팅 기록을 초기화 할 수 있습니다. diff --git a/spaces/Jai12345/App/app.py b/spaces/Jai12345/App/app.py deleted file mode 100644 index 8346996dd4e5871b367c37f30de7b81ab66648fd..0000000000000000000000000000000000000000 --- a/spaces/Jai12345/App/app.py +++ /dev/null @@ -1,234 +0,0 @@ -from sentence_transformers import SentenceTransformer, CrossEncoder, util -import re -import pandas as pd -from newspaper import Article -import docx2txt -from io import StringIO -from PyPDF2 import PdfFileReader -import validators -import nltk -import streamlit as st -import pickle - -nltk.download('punkt') - -from nltk import sent_tokenize - -def extract_text_from_url(url: str): - '''Extract text from url''' - - article = Article(url) - article.download() - article.parse() - - # get text - text = article.text - - # get article title - title = article.title - - return title, text - - -def extract_text_from_file(file): - '''Extract text from uploaded file''' - - # read text file - if file.type == "text/plain": - # To convert to a string based IO: - stringio = StringIO(file.getvalue().decode("utf-8")) - - # To read file as string: - file_text = stringio.read() - - return file_text, None - - # read pdf file - elif file.type == "application/pdf": - pdfReader = PdfFileReader(file) - count = pdfReader.numPages - all_text = "" - pdf_title = pdfReader.getDocumentInfo().title - - for i in range(count): - - try: - page = pdfReader.getPage(i) - all_text += page.extractText() - - except: - continue - - file_text = all_text - - return file_text, pdf_title - - # read docx file - elif ( - file.type - == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" - ): - file_text = docx2txt.process(file) - - return file_text, None - - -def preprocess_plain_text(text, window_size=3): - text = text.encode("ascii", "ignore").decode() # unicode - text = re.sub(r"https*\S+", " ", text) # url - text = re.sub(r"@\S+", " ", text) # mentions - text = re.sub(r"#\S+", " ", text) # hastags - text = re.sub(r"\s{2,}", " ", text) # over spaces - text = re.sub("[^.,!?%$A-Za-z0-9]+", " ", text) # special characters except .,!? - - # break into lines and remove leading and trailing space on each - lines = [line.strip() for line in text.splitlines()] - - # #break multi-headlines into a line each - chunks = [phrase.strip() for line in lines for phrase in line.split(" ")] - - # drop blank lines - text = '\n'.join(chunk for chunk in chunks if chunk) - - # We split this article into paragraphs and then every paragraph into sentences - paragraphs = [] - for paragraph in text.replace('\n', ' ').split("\n\n"): - if len(paragraph.strip()) > 0: - paragraphs.append(sent_tokenize(paragraph.strip())) - - window_size = 3 - passages = [] - for paragraph in paragraphs: - for start_idx in range(0, len(paragraph), window_size): - end_idx = min(start_idx + window_size, len(paragraph)) - passages.append(" ".join(paragraph[start_idx:end_idx])) - - return passages - - -def bi_encode(bi_enc,passages): - global bi_encoder - # We use the Bi-Encoder to encode all passages, so that we can use it with sematic search - bi_encoder = SentenceTransformer(bi_enc) - - - # Compute the embeddings - with st.spinner('Encoding passages into a vector space...'): - corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True) - - st.success(f"Embeddings computed.") - - return bi_encoder, corpus_embeddings - - -def cross_encode(): - global cross_encoder - # We use a cross-encoder, to re-rank the results list to improve the quality - cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2') - return cross_encoder - - - -def display_as_table(model, score='score'): - # Display the df with text and scores as a table - df = pd.DataFrame([(hit[score],passages[hit['corpus_id']]) for hit in model[0:2]],columns=['Score','Text']) - df['Score'] = round(df['Score'],2) - - return df - - -st.title("Search Your Query Here") -window_size = 3 -bi_encoder_type = "multi-qa-mpnet-base-dot-v1" - -# This will search articles for passages to answer the query -def search_func(query): - global bi_encoder, cross_encoder - - st.subheader(f"Search Query: {query}") - - if url_text: - - st.write(f"Document Header: {title}") - - elif pdf_title: - - st.write(f"Document Header: {pdf_title}") - - - # Encode the query using the bi-encoder and find relevant answers - question_embedding = bi_encoder.encode(query, convert_to_tensor=True) - question_embedding = question_embedding.cpu() - hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=2, score_function=util.dot_score) - hits = hits[0] # Get the hits for the first query - - # Now, score all retrieved passages with the cross_encoder - cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits] - cross_scores = cross_encoder.predict(cross_inp) - - # Sort results by the cross-encoder scores - for idx in range(len(cross_scores)): - hits[idx]['cross-score'] = cross_scores[idx] - - # Output of top hits from cross encoder - st.markdown("\n-------------------------\n") - st.subheader(f"Top 2 Results") - hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True) - - rerank_df = display_as_table(hits, 'cross-score') - st.write(rerank_df.to_html(index=False), unsafe_allow_html=True) - - -def clear_text(): - st.session_state["text_url"] = "" - st.session_state["text_input"] = "" - - -def clear_search_text(): - st.session_state["text_input"] = "" - - -url_text = st.text_input("Please Enter a url here",value="https://en.wikipedia.org/wiki/Virat_Kohli",key='text_url', on_change=clear_search_text) - -st.markdown( - "

    OR

    ", - unsafe_allow_html=True, -) - -upload_doc = st.file_uploader("Upload a .txt, .pdf, .docx file", key="upload") - -search_query = st.text_input("Please Enter your search query here", - value="Who is Virat Kohli?", key="text_input") - -if validators.url(url_text): - # if input is URL - title, text = extract_text_from_url(url_text) - passages = preprocess_plain_text(text, window_size=3) - -elif upload_doc: - - text, pdf_title = extract_text_from_file(upload_doc) - passages = preprocess_plain_text(text, window_size=3) - -col1, col2 = st.columns(2) - -with col1: - search = st.button("Search", key='search_but', help='Click to Search!!') - -with col2: - clear = st.button("Clear Text Input", on_click=clear_text, key='clear',help='Click to clear the URL and query') - -if search: - if bi_encoder_type: - with st.spinner( - text=f"Loading..........................." - ): - bi_encoder, corpus_embeddings = bi_encode(bi_encoder_type,passages) - cross_encoder = cross_encode() - - with st.spinner( - text="Embedding completed, searching for relevant text for given query and hits..."): - search_func(search_query) - -st.markdown(""" - """) diff --git a/spaces/Juancho/forest_fire_detector/README.md b/spaces/Juancho/forest_fire_detector/README.md deleted file mode 100644 index fdd0c98f751b3bc7e7dc3be54935a4c4ddfd001b..0000000000000000000000000000000000000000 --- a/spaces/Juancho/forest_fire_detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Forest Fire Detector -emoji: 🏢 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.1.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JunchuanYu/SegRS/segment_anything/modeling/__init__.py b/spaces/JunchuanYu/SegRS/segment_anything/modeling/__init__.py deleted file mode 100644 index 38e906243d898d7fc071c0fe218338c5cace3ea1..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SegRS/segment_anything/modeling/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from .sam import Sam -from .image_encoder import ImageEncoderViT -from .mask_decoder import MaskDecoder -from .prompt_encoder import PromptEncoder -from .transformer import TwoWayTransformer diff --git a/spaces/Kayson/InstructDiffusion/edit_cli.py b/spaces/Kayson/InstructDiffusion/edit_cli.py deleted file mode 100644 index 8e8b9ff45eea6d7abefa2abda097a0d5e5ddffb4..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/edit_cli.py +++ /dev/null @@ -1,136 +0,0 @@ -# -------------------------------------------------------- -# InstructDiffusion -# Based on instruct-pix2pix (https://github.com/timothybrooks/instruct-pix2pix) -# Modified by Zigang Geng (zigang@mail.ustc.edu.cn) -# -------------------------------------------------------- - -from __future__ import annotations - -import os -import math -import random -import sys -from argparse import ArgumentParser - -import einops -import k_diffusion as K -import numpy as np -import torch -import torch.nn as nn -from einops import rearrange -from omegaconf import OmegaConf -from PIL import Image, ImageOps -from torch import autocast - -import requests - -sys.path.append("./stable_diffusion") - -from stable_diffusion.ldm.util import instantiate_from_config - - -class CFGDenoiser(nn.Module): - def __init__(self, model): - super().__init__() - self.inner_model = model - - def forward(self, z, sigma, cond, uncond, text_cfg_scale, image_cfg_scale): - cfg_z = einops.repeat(z, "b ... -> (repeat b) ...", repeat=3) - cfg_sigma = einops.repeat(sigma, "b ... -> (repeat b) ...", repeat=3) - cfg_cond = { - "c_crossattn": [torch.cat([cond["c_crossattn"][0], uncond["c_crossattn"][0], cond["c_crossattn"][0]])], - "c_concat": [torch.cat([cond["c_concat"][0], cond["c_concat"][0], uncond["c_concat"][0]])], - } - out_cond, out_img_cond, out_txt_cond \ - = self.inner_model(cfg_z, cfg_sigma, cond=cfg_cond).chunk(3) - return 0.5 * (out_img_cond + out_txt_cond) + \ - text_cfg_scale * (out_cond - out_img_cond) + \ - image_cfg_scale * (out_cond - out_txt_cond) - - -def load_model_from_config(config, ckpt, vae_ckpt=None, verbose=False): - model = instantiate_from_config(config.model) - - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if 'state_dict' in pl_sd: - pl_sd = pl_sd['state_dict'] - m, u = model.load_state_dict(pl_sd, strict=False) - - print(m, u) - return model - - -def main(): - parser = ArgumentParser() - parser.add_argument("--resolution", default=512, type=int) - parser.add_argument("--steps", default=100, type=int) - parser.add_argument("--config", default="configs/instruct_diffusion.yaml", type=str) - parser.add_argument("--ckpt", default="checkpoints/v1-5-pruned-emaonly-adaption-task.ckpt", type=str) - parser.add_argument("--vae-ckpt", default=None, type=str) - parser.add_argument("--input", required=True, type=str) - parser.add_argument("--outdir", default="logs", type=str) - parser.add_argument("--edit", required=True, type=str) - parser.add_argument("--cfg-text", default=5.0, type=float) - parser.add_argument("--cfg-image", default=1.25, type=float) - parser.add_argument("--seed", type=int) - args = parser.parse_args() - - config = OmegaConf.load(args.config) - model = load_model_from_config(config, args.ckpt, args.vae_ckpt) - model.eval().cuda() - - model_wrap = K.external.CompVisDenoiser(model) - model_wrap_cfg = CFGDenoiser(model_wrap) - null_token = model.get_learned_conditioning([""]) - - seed = random.randint(0, 100000) if args.seed is None else args.seed - - if args.input.startswith("http"): - input_image = Image.open(requests.get(args.input, stream=True).raw).convert("RGB") - else: - input_image = Image.open(args.input).convert("RGB") - width, height = input_image.size - factor = args.resolution / max(width, height) - factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height) - width_resize = int((width * factor) // 64) * 64 - height_resize = int((height * factor) // 64) * 64 - input_image = ImageOps.fit(input_image, (width_resize, height_resize), method=Image.Resampling.LANCZOS) - - output_dir = args.outdir - os.makedirs(output_dir, exist_ok=True) - with torch.no_grad(), autocast("cuda"): - cond = {} - cond["c_crossattn"] = [model.get_learned_conditioning([args.edit])] - input_image = 2 * torch.tensor(np.array(input_image)).float() / 255 - 1 - input_image = rearrange(input_image, "h w c -> 1 c h w").to(next(model.parameters()).device) - cond["c_concat"] = [model.encode_first_stage(input_image).mode()] - - uncond = {} - uncond["c_crossattn"] = [null_token] - uncond["c_concat"] = [torch.zeros_like(cond["c_concat"][0])] - - sigmas = model_wrap.get_sigmas(args.steps) - - extra_args = { - "cond": cond, - "uncond": uncond, - "text_cfg_scale": args.cfg_text, - "image_cfg_scale": args.cfg_image, - } - - torch.manual_seed(seed) - z = torch.randn_like(cond["c_concat"][0]) * sigmas[0] - z = K.sampling.sample_euler_ancestral(model_wrap_cfg, z, sigmas, extra_args=extra_args) - x = model.decode_first_stage(z) - x = torch.clamp((x + 1.0) / 2.0, min=0.0, max=1.0) - x = 255.0 * rearrange(x, "1 c h w -> h w c") - print(x.shape) - edited_image = Image.fromarray(x.type(torch.uint8).cpu().numpy()) - - edited_image = ImageOps.fit(edited_image, (width, height), method=Image.Resampling.LANCZOS) - edited_image.save(output_dir+'/output_'+args.input.split('/')[-1].split('.')[0]+'_seed'+str(seed)+'.jpg') - - -if __name__ == "__main__": - main() diff --git a/spaces/LightChen2333/OpenSLU/tools/__init__.py b/spaces/LightChen2333/OpenSLU/tools/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/tools/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/LinkSoul/LLaSM/static/js/bulma-slider.min.js b/spaces/LinkSoul/LLaSM/static/js/bulma-slider.min.js deleted file mode 100644 index 7e62685763cf7668cfa8857fac0b27af2c277286..0000000000000000000000000000000000000000 --- a/spaces/LinkSoul/LLaSM/static/js/bulma-slider.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default}); \ No newline at end of file diff --git a/spaces/Linly-AI/Linly-ChatFlow/README.md b/spaces/Linly-AI/Linly-ChatFlow/README.md deleted file mode 100644 index 121c80f8b5b508fffb2682f862e7d6ec40028131..0000000000000000000000000000000000000000 --- a/spaces/Linly-AI/Linly-ChatFlow/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Linly ChatFlow 7B -emoji: 📉 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Liu-LAB/GPT-academic/multi_language.py b/spaces/Liu-LAB/GPT-academic/multi_language.py deleted file mode 100644 index c4ed36eb72f8c5f9a87accbea4a35c633e26854c..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/multi_language.py +++ /dev/null @@ -1,517 +0,0 @@ -""" - Translate this project to other languages (experimental, please open an issue if there is any bug) - - - Usage: - 1. modify config.py, set your LLM_MODEL and API_KEY(s) to provide access to OPENAI (or any other LLM model provider) - - 2. modify LANG (below ↓) - LANG = "English" - - 3. modify TransPrompt (below ↓) - TransPrompt = f"Replace each json value `#` with translated results in English, e.g., \"原始文本\":\"TranslatedText\". Keep Json format. Do not answer #." - - 4. Run `python multi_language.py`. - Note: You need to run it multiple times to increase translation coverage because GPT makes mistakes sometimes. - - 5. Find the translated program in `multi-language\English\*` - - P.S. - - - The translation mapping will be stored in `docs/translation_xxxx.json`, you can revised mistaken translation there. - - - If you would like to share your `docs/translation_xxxx.json`, (so that everyone can use the cached & revised translation mapping), please open a Pull Request - - - If there is any translation error in `docs/translation_xxxx.json`, please open a Pull Request - - - Welcome any Pull Request, regardless of language -""" - -import os -import json -import functools -import re -import pickle -import time - -CACHE_FOLDER = "gpt_log" -blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv'] - -# LANG = "TraditionalChinese" -# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #." - -# LANG = "Japanese" -# TransPrompt = f"Replace each json value `#` with translated results in Japanese, e.g., \"原始文本\":\"テキストの翻訳\". Keep Json format. Do not answer #." - -LANG = "English" -TransPrompt = f"Replace each json value `#` with translated results in English, e.g., \"原始文本\":\"TranslatedText\". Keep Json format. Do not answer #." - - -if not os.path.exists(CACHE_FOLDER): - os.makedirs(CACHE_FOLDER) - - -def lru_file_cache(maxsize=128, ttl=None, filename=None): - """ - Decorator that caches a function's return value after being called with given arguments. - It uses a Least Recently Used (LRU) cache strategy to limit the size of the cache. - maxsize: Maximum size of the cache. Defaults to 128. - ttl: Time-to-Live of the cache. If a value hasn't been accessed for `ttl` seconds, it will be evicted from the cache. - filename: Name of the file to store the cache in. If not supplied, the function name + ".cache" will be used. - """ - cache_path = os.path.join(CACHE_FOLDER, f"{filename}.cache") if filename is not None else None - - def decorator_function(func): - cache = {} - _cache_info = { - "hits": 0, - "misses": 0, - "maxsize": maxsize, - "currsize": 0, - "ttl": ttl, - "filename": cache_path, - } - - @functools.wraps(func) - def wrapper_function(*args, **kwargs): - key = str((args, frozenset(kwargs))) - if key in cache: - if _cache_info["ttl"] is None or (cache[key][1] + _cache_info["ttl"]) >= time.time(): - _cache_info["hits"] += 1 - print(f'Warning, reading cache, last read {(time.time()-cache[key][1])//60} minutes ago'); time.sleep(2) - cache[key][1] = time.time() - return cache[key][0] - else: - del cache[key] - - result = func(*args, **kwargs) - cache[key] = [result, time.time()] - _cache_info["misses"] += 1 - _cache_info["currsize"] += 1 - - if _cache_info["currsize"] > _cache_info["maxsize"]: - oldest_key = None - for k in cache: - if oldest_key is None: - oldest_key = k - elif cache[k][1] < cache[oldest_key][1]: - oldest_key = k - del cache[oldest_key] - _cache_info["currsize"] -= 1 - - if cache_path is not None: - with open(cache_path, "wb") as f: - pickle.dump(cache, f) - - return result - - def cache_info(): - return _cache_info - - wrapper_function.cache_info = cache_info - - if cache_path is not None and os.path.exists(cache_path): - with open(cache_path, "rb") as f: - cache = pickle.load(f) - _cache_info["currsize"] = len(cache) - - return wrapper_function - - return decorator_function - -def contains_chinese(string): - """ - Returns True if the given string contains Chinese characters, False otherwise. - """ - chinese_regex = re.compile(u'[\u4e00-\u9fff]+') - return chinese_regex.search(string) is not None - -def split_list(lst, n_each_req): - """ - Split a list into smaller lists, each with a maximum number of elements. - :param lst: the list to split - :param n_each_req: the maximum number of elements in each sub-list - :return: a list of sub-lists - """ - result = [] - for i in range(0, len(lst), n_each_req): - result.append(lst[i:i + n_each_req]) - return result - -def map_to_json(map, language): - dict_ = read_map_from_json(language) - dict_.update(map) - with open(f'docs/translate_{language.lower()}.json', 'w', encoding='utf8') as f: - json.dump(dict_, f, indent=4, ensure_ascii=False) - -def read_map_from_json(language): - if os.path.exists(f'docs/translate_{language.lower()}.json'): - with open(f'docs/translate_{language.lower()}.json', 'r', encoding='utf8') as f: - res = json.load(f) - res = {k:v for k, v in res.items() if v is not None and contains_chinese(k)} - return res - return {} - -def advanced_split(splitted_string, spliter, include_spliter=False): - splitted_string_tmp = [] - for string_ in splitted_string: - if spliter in string_: - splitted = string_.split(spliter) - for i, s in enumerate(splitted): - if include_spliter: - if i != len(splitted)-1: - splitted[i] += spliter - splitted[i] = splitted[i].strip() - for i in reversed(range(len(splitted))): - if not contains_chinese(splitted[i]): - splitted.pop(i) - splitted_string_tmp.extend(splitted) - else: - splitted_string_tmp.append(string_) - splitted_string = splitted_string_tmp - return splitted_string_tmp - -cached_translation = {} -cached_translation = read_map_from_json(language=LANG) - -def trans(word_to_translate, language, special=False): - if len(word_to_translate) == 0: return {} - from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from toolbox import get_conf, ChatBotWithCookies - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, - 'top_p':1.0, - 'max_length': None, - 'temperature':0.4, - } - import random - N_EACH_REQ = random.randint(16, 32) - word_to_translate_split = split_list(word_to_translate, N_EACH_REQ) - inputs_array = [str(s) for s in word_to_translate_split] - inputs_show_user_array = inputs_array - history_array = [[] for _ in inputs_array] - if special: # to English using CamelCase Naming Convention - sys_prompt_array = [f"Translate following names to English with CamelCase naming convention. Keep original format" for _ in inputs_array] - else: - sys_prompt_array = [f"Translate following sentences to {LANG}. E.g., You should translate sentences to the following format ['translation of sentence 1', 'translation of sentence 2']. Do NOT answer with Chinese!" for _ in inputs_array] - chatbot = ChatBotWithCookies(llm_kwargs) - gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array, - inputs_show_user_array, - llm_kwargs, - chatbot, - history_array, - sys_prompt_array, - ) - while True: - try: - gpt_say = next(gpt_say_generator) - print(gpt_say[1][0][1]) - except StopIteration as e: - result = e.value - break - translated_result = {} - for i, r in enumerate(result): - if i%2 == 1: - try: - res_before_trans = eval(result[i-1]) - res_after_trans = eval(result[i]) - if len(res_before_trans) != len(res_after_trans): - raise RuntimeError - for a,b in zip(res_before_trans, res_after_trans): - translated_result[a] = b - except: - # try: - # res_before_trans = word_to_translate_split[(i-1)//2] - # res_after_trans = [s for s in result[i].split("', '")] - # for a,b in zip(res_before_trans, res_after_trans): - # translated_result[a] = b - # except: - print('GPT answers with unexpected format, some words may not be translated, but you can try again later to increase translation coverage.') - res_before_trans = eval(result[i-1]) - for a in res_before_trans: - translated_result[a] = None - return translated_result - - -def trans_json(word_to_translate, language, special=False): - if len(word_to_translate) == 0: return {} - from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from toolbox import get_conf, ChatBotWithCookies - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, - 'top_p':1.0, - 'max_length': None, - 'temperature':0.1, - } - import random - N_EACH_REQ = random.randint(16, 32) - random.shuffle(word_to_translate) - word_to_translate_split = split_list(word_to_translate, N_EACH_REQ) - inputs_array = [{k:"#" for k in s} for s in word_to_translate_split] - inputs_array = [ json.dumps(i, ensure_ascii=False) for i in inputs_array] - - inputs_show_user_array = inputs_array - history_array = [[] for _ in inputs_array] - sys_prompt_array = [TransPrompt for _ in inputs_array] - chatbot = ChatBotWithCookies(llm_kwargs) - gpt_say_generator = request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array, - inputs_show_user_array, - llm_kwargs, - chatbot, - history_array, - sys_prompt_array, - ) - while True: - try: - gpt_say = next(gpt_say_generator) - print(gpt_say[1][0][1]) - except StopIteration as e: - result = e.value - break - translated_result = {} - for i, r in enumerate(result): - if i%2 == 1: - try: - translated_result.update(json.loads(result[i])) - except: - print(result[i]) - print(result) - return translated_result - - -def step_1_core_key_translate(): - LANG_STD = 'std' - def extract_chinese_characters(file_path): - syntax = [] - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - import ast - root = ast.parse(content) - for node in ast.walk(root): - if isinstance(node, ast.Name): - if contains_chinese(node.id): syntax.append(node.id) - if isinstance(node, ast.Import): - for n in node.names: - if contains_chinese(n.name): syntax.append(n.name) - elif isinstance(node, ast.ImportFrom): - for n in node.names: - if contains_chinese(n.name): syntax.append(n.name) - # if node.module is None: print(node.module) - for k in node.module.split('.'): - if contains_chinese(k): syntax.append(k) - return syntax - - def extract_chinese_characters_from_directory(directory_path): - chinese_characters = [] - for root, dirs, files in os.walk(directory_path): - if any([b in root for b in blacklist]): - continue - print(files) - for file in files: - if file.endswith('.py'): - file_path = os.path.join(root, file) - chinese_characters.extend(extract_chinese_characters(file_path)) - return chinese_characters - - directory_path = './' - chinese_core_names = extract_chinese_characters_from_directory(directory_path) - chinese_core_keys = [name for name in chinese_core_names] - chinese_core_keys_norepeat = [] - for d in chinese_core_keys: - if d not in chinese_core_keys_norepeat: chinese_core_keys_norepeat.append(d) - need_translate = [] - cached_translation = read_map_from_json(language=LANG_STD) - cached_translation_keys = list(cached_translation.keys()) - for d in chinese_core_keys_norepeat: - if d not in cached_translation_keys: - need_translate.append(d) - - need_translate_mapping = trans(need_translate, language=LANG_STD, special=True) - map_to_json(need_translate_mapping, language=LANG_STD) - cached_translation = read_map_from_json(language=LANG_STD) - cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0]))) - - chinese_core_keys_norepeat_mapping = {} - for k in chinese_core_keys_norepeat: - chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]}) - chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0]))) - - # =============================================== - # copy - # =============================================== - def copy_source_code(): - - from toolbox import get_conf - import shutil - import os - try: shutil.rmtree(f'./multi-language/{LANG}/') - except: pass - os.makedirs(f'./multi-language', exist_ok=True) - backup_dir = f'./multi-language/{LANG}/' - shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist) - copy_source_code() - - # =============================================== - # primary key replace - # =============================================== - directory_path = f'./multi-language/{LANG}/' - for root, dirs, files in os.walk(directory_path): - for file in files: - if file.endswith('.py'): - file_path = os.path.join(root, file) - syntax = [] - # read again - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - for k, v in chinese_core_keys_norepeat_mapping.items(): - content = content.replace(k, v) - - with open(file_path, 'w', encoding='utf-8') as f: - f.write(content) - - -def step_2_core_key_translate(): - - # ================================================================================================= - # step2 - # ================================================================================================= - - def load_string(strings, string_input): - string_ = string_input.strip().strip(',').strip().strip('.').strip() - if string_.startswith('[Local Message]'): - string_ = string_.replace('[Local Message]', '') - string_ = string_.strip().strip(',').strip().strip('.').strip() - splitted_string = [string_] - # -------------------------------------- - splitted_string = advanced_split(splitted_string, spliter=",", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="。", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=")", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="(", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="(", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=")", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="<", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=">", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="[", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="]", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="【", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="】", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="?", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=":", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=":", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=",", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="#", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="\n", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=";", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="`", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter=" ", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="- ", include_spliter=False) - splitted_string = advanced_split(splitted_string, spliter="---", include_spliter=False) - - # -------------------------------------- - for j, s in enumerate(splitted_string): # .com - if '.com' in s: continue - if '\'' in s: continue - if '\"' in s: continue - strings.append([s,0]) - - - def get_strings(node): - strings = [] - # recursively traverse the AST - for child in ast.iter_child_nodes(node): - node = child - if isinstance(child, ast.Str): - if contains_chinese(child.s): - load_string(strings=strings, string_input=child.s) - elif isinstance(child, ast.AST): - strings.extend(get_strings(child)) - return strings - - string_literals = [] - directory_path = f'./multi-language/{LANG}/' - for root, dirs, files in os.walk(directory_path): - for file in files: - if file.endswith('.py'): - file_path = os.path.join(root, file) - syntax = [] - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - # comments - comments_arr = [] - for code_sp in content.splitlines(): - comments = re.findall(r'#.*$', code_sp) - for comment in comments: - load_string(strings=comments_arr, string_input=comment) - string_literals.extend(comments_arr) - - # strings - import ast - tree = ast.parse(content) - res = get_strings(tree, ) - string_literals.extend(res) - - [print(s) for s in string_literals] - chinese_literal_names = [] - chinese_literal_names_norepeat = [] - for string, offset in string_literals: - chinese_literal_names.append(string) - chinese_literal_names_norepeat = [] - for d in chinese_literal_names: - if d not in chinese_literal_names_norepeat: chinese_literal_names_norepeat.append(d) - need_translate = [] - cached_translation = read_map_from_json(language=LANG) - cached_translation_keys = list(cached_translation.keys()) - for d in chinese_literal_names_norepeat: - if d not in cached_translation_keys: - need_translate.append(d) - - - up = trans_json(need_translate, language=LANG, special=False) - map_to_json(up, language=LANG) - cached_translation = read_map_from_json(language=LANG) - LANG_STD = 'std' - cached_translation.update(read_map_from_json(language=LANG_STD)) - cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0]))) - - # =============================================== - # literal key replace - # =============================================== - directory_path = f'./multi-language/{LANG}/' - for root, dirs, files in os.walk(directory_path): - for file in files: - if file.endswith('.py'): - file_path = os.path.join(root, file) - syntax = [] - # read again - with open(file_path, 'r', encoding='utf-8') as f: - content = f.read() - - for k, v in cached_translation.items(): - if v is None: continue - if '"' in v: - v = v.replace('"', "`") - if '\'' in v: - v = v.replace('\'', "`") - content = content.replace(k, v) - - with open(file_path, 'w', encoding='utf-8') as f: - f.write(content) - - if file.strip('.py') in cached_translation: - file_new = cached_translation[file.strip('.py')] + '.py' - file_path_new = os.path.join(root, file_new) - with open(file_path_new, 'w', encoding='utf-8') as f: - f.write(content) - os.remove(file_path) -step_1_core_key_translate() -step_2_core_key_translate() -print('Finished, checkout generated results at ./multi-language/') \ No newline at end of file diff --git a/spaces/Luelll/ChuanhuChatGPT/assets/Kelpy-Codos.js b/spaces/Luelll/ChuanhuChatGPT/assets/Kelpy-Codos.js deleted file mode 100644 index cfbaeedb4f371dfb5fe157db545b364046fca3e1..0000000000000000000000000000000000000000 --- a/spaces/Luelll/ChuanhuChatGPT/assets/Kelpy-Codos.js +++ /dev/null @@ -1,76 +0,0 @@ -// ==UserScript== -// @name Kelpy Codos -// @namespace https://github.com/Keldos-Li/Kelpy-Codos -// @version 1.0.5 -// @author Keldos; https://keldos.me/ -// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. -// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) -// @license GPL-3.0 -// @grant none -// ==/UserScript== - -(function () { - 'use strict'; - - function addCopyButton(pre) { - var code = pre.querySelector('code'); - if (!code) { - return; // 如果没有找到 元素,则不添加按钮 - } - var firstChild = code.firstChild; - if (!firstChild) { - return; // 如果 元素没有子节点,则不添加按钮 - } - var button = document.createElement('button'); - button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本 - button.style.position = 'relative'; - button.style.float = 'right'; - button.style.fontSize = '1em'; // 可选:调整按钮大小 - button.style.background = 'none'; // 可选:去掉背景颜色 - button.style.border = 'none'; // 可选:去掉边框 - button.style.cursor = 'pointer'; // 可选:显示指针样式 - button.addEventListener('click', function () { - var range = document.createRange(); - range.selectNodeContents(code); - range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 - var selection = window.getSelection(); - selection.removeAllRanges(); - selection.addRange(range); - - try { - var success = document.execCommand('copy'); - if (success) { - button.textContent = '\u2714'; - setTimeout(function () { - button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制” - }, 2000); - } else { - button.textContent = '\u2716'; - } - } catch (e) { - console.error(e); - button.textContent = '\u2716'; - } - - selection.removeAllRanges(); - }); - code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 - } - - function handleNewElements(mutationsList, observer) { - for (var mutation of mutationsList) { - if (mutation.type === 'childList') { - for (var node of mutation.addedNodes) { - if (node.nodeName === 'PRE') { - addCopyButton(node); - } - } - } - } - } - - var observer = new MutationObserver(handleNewElements); - observer.observe(document.documentElement, { childList: true, subtree: true }); - - document.querySelectorAll('pre').forEach(addCopyButton); -})(); diff --git a/spaces/LuxOAI/BGCGW/README.md b/spaces/LuxOAI/BGCGW/README.md deleted file mode 100644 index f7a4579af719fbcc64e5cba13c37587b66f93126..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/BGCGW/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: BGCGW -emoji: 👁 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/LuxOAI/ChatGpt-Web/app/locales/jp.ts b/spaces/LuxOAI/ChatGpt-Web/app/locales/jp.ts deleted file mode 100644 index 472fa7020178d6cf02dd2701cb454c4e6ae8decf..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/locales/jp.ts +++ /dev/null @@ -1,243 +0,0 @@ -import { SubmitKey } from "../store/config"; -import type { LocaleType } from "./index"; - -const jp: LocaleType = { - WIP: "この機能は開発中です……", - Error: { - Unauthorized: - "現在は未承認状態です。左下の設定ボタンをクリックし、アクセスパスワードを入力してください。", - }, - ChatItem: { - ChatItemCount: (count: number) => `${count} 通のチャット`, - }, - Chat: { - SubTitle: (count: number) => `ChatGPTとの ${count} 通のチャット`, - Actions: { - ChatList: "メッセージリストを表示", - CompressedHistory: "圧縮された履歴プロンプトを表示", - Export: "チャット履歴をエクスポート", - Copy: "コピー", - Stop: "停止", - Retry: "リトライ", - Delete: "Delete", - }, - Rename: "チャットの名前を変更", - Typing: "入力中…", - Input: (submitKey: string) => { - var inputHints = `${submitKey} で送信`; - if (submitKey === String(SubmitKey.Enter)) { - inputHints += ",Shift + Enter で改行"; - } - return inputHints + ",/ で自動補完をトリガー"; - }, - Send: "送信", - Config: { - Reset: "重置默认", - SaveAs: "另存为面具", - }, - }, - Export: { - Title: "チャット履歴をMarkdown形式でエクスポート", - Copy: "すべてコピー", - Download: "ファイルをダウンロード", - MessageFromYou: "あなたからのメッセージ", - MessageFromChatGPT: "ChatGPTからのメッセージ", - }, - Memory: { - Title: "履歴メモリ", - EmptyContent: "まだ記憶されていません", - Send: "メモリを送信", - Copy: "メモリをコピー", - Reset: "チャットをリセット", - ResetConfirm: - "リセット後、現在のチャット履歴と過去のメモリがクリアされます。リセットしてもよろしいですか?", - }, - Home: { - NewChat: "新しいチャット", - DeleteChat: "選択したチャットを削除してもよろしいですか?", - DeleteToast: "チャットが削除されました", - Revert: "元に戻す", - }, - Settings: { - Title: "設定", - SubTitle: "設定オプション", - Actions: { - ClearAll: "すべてのデータをクリア", - ResetAll: "すべてのオプションをリセット", - Close: "閉じる", - ConfirmResetAll: "すべての設定をリセットしてもよろしいですか?", - ConfirmClearAll: "すべてのチャットをリセットしてもよろしいですか?", - }, - Lang: { - Name: "Language", - All: "所有语言", - Options: { - cn: "简体中文", - en: "English", - tw: "繁體中文", - es: "Español", - it: "Italiano", - tr: "Türkçe", - jp: "日本語", - de: "Deutsch", - }, - }, - Avatar: "アバター", - FontSize: { - Title: "フォントサイズ", - SubTitle: "チャット内容のフォントサイズ", - }, - - Update: { - Version: (x: string) => `現在のバージョン:${x}`, - IsLatest: "最新バージョンです", - CheckUpdate: "アップデートを確認", - IsChecking: "アップデートを確認しています...", - FoundUpdate: (x: string) => `新しいバージョンが見つかりました:${x}`, - GoToUpdate: "更新する", - }, - SendKey: "送信キー", - Theme: "テーマ", - TightBorder: "ボーダーレスモード", - SendPreviewBubble: { - Title: "プレビューバブルの送信", - SubTitle: "在预览气泡中预览 Markdown 内容", - }, - Mask: { - Title: "面具启动页", - SubTitle: "新建聊天时,展示面具启动页", - }, - Prompt: { - Disable: { - Title: "プロンプトの自動補完を無効にする", - SubTitle: - "入力フィールドの先頭に / を入力すると、自動補完がトリガーされます。", - }, - List: "カスタムプロンプトリスト", - ListCount: (builtin: number, custom: number) => - `組み込み ${builtin} 件、ユーザー定義 ${custom} 件`, - Edit: "編集", - Modal: { - Title: "プロンプトリスト", - Add: "新規追加", - Search: "プロンプトワード検索", - }, - EditModal: { - Title: "编辑提示词", - }, - }, - HistoryCount: { - Title: "履歴メッセージ数を添付", - SubTitle: "リクエストごとに添付する履歴メッセージ数", - }, - CompressThreshold: { - Title: "履歴メッセージの長さ圧縮しきい値", - SubTitle: - "圧縮されていない履歴メッセージがこの値を超えた場合、圧縮が行われます。", - }, - Token: { - Title: "APIキー", - SubTitle: "自分のキーを使用してパスワードアクセス制限を迂回する", - Placeholder: "OpenAI APIキー", - }, - Usage: { - Title: "残高照会", - SubTitle(used: any, total: any) { - return `今月は $${used} を使用しました。総額は $${total} です。`; - }, - IsChecking: "確認中...", - Check: "再確認", - NoAccess: "APIキーまたはアクセスパスワードを入力して残高を表示", - }, - AccessCode: { - Title: "アクセスパスワード", - SubTitle: "暗号化アクセスが有効になっています", - Placeholder: "アクセスパスワードを入力してください", - }, - Bot: "AIベンダー (bot)", - Model: "モデル (model)", - Temperature: { - Title: "ランダム性 (temperature)", - SubTitle: - "値が大きいほど、回答がランダムになります。1以上の値には文字化けが含まれる可能性があります。", - }, - MaxTokens: { - Title: "シングルレスポンス制限 (max_tokens)", - SubTitle: "1回のインタラクションで使用される最大トークン数", - }, - PresencePenlty: { - Title: "トピックの新鮮度 (presence_penalty)", - SubTitle: "値が大きいほど、新しいトピックへの展開が可能になります。", - }, - }, - Store: { - DefaultTopic: "新しいチャット", - BotHello: "何かお手伝いできることはありますか", - Error: "エラーが発生しました。しばらくしてからやり直してください。", - Prompt: { - History: (content: string) => - "これは、AI とユーザの過去のチャットを要約した前提となるストーリーです:" + - content, - Topic: - "4~5文字でこの文章の簡潔な主題を返してください。説明、句読点、感嘆詞、余分なテキストは無しで。もし主題がない場合は、「おしゃべり」を返してください", - Summarize: - "あなたとユーザの会話を簡潔にまとめて、後続のコンテキストプロンプトとして使ってください。200字以内に抑えてください。", - }, - }, - Copy: { - Success: "クリップボードに書き込みました", - Failed: "コピーに失敗しました。クリップボード許可を与えてください。", - }, - Context: { - Toast: (x: any) => `前置コンテキストが ${x} 件設定されました`, - Edit: "前置コンテキストと履歴メモリ", - Add: "新規追加", - }, - Plugin: { Name: "插件" }, - Mask: { - Name: "面具", - Page: { - Title: "预设角色面具", - SubTitle: (count: number) => `${count} 个预设角色定义`, - Search: "搜索角色面具", - Create: "新建", - }, - Item: { - Info: (count: number) => `包含 ${count} 条预设对话`, - Chat: "对话", - View: "查看", - Edit: "编辑", - Delete: "删除", - DeleteConfirm: "确认删除?", - }, - EditModal: { - Title: (readonly: boolean) => - `编辑预设面具 ${readonly ? "(只读)" : ""}`, - Download: "下载预设", - Clone: "克隆预设", - }, - Config: { - Avatar: "角色头像", - Name: "角色名称", - }, - }, - NewChat: { - Return: "返回", - Skip: "跳过", - Title: "挑选一个面具", - SubTitle: "现在开始,与面具背后的灵魂思维碰撞", - More: "搜索更多", - NotShow: "不再展示", - ConfirmNoShow: "确认禁用?禁用后可以随时在设置中重新启用。", - }, - - UI: { - Confirm: "确认", - Cancel: "取消", - Close: "关闭", - Create: "新建", - Edit: "编辑", - }, -}; - -export default jp; diff --git a/spaces/MBZ/LoRA-DreamBooth-Training-UI/utils.py b/spaces/MBZ/LoRA-DreamBooth-Training-UI/utils.py deleted file mode 100644 index 8fe82394db3a576d0b8bb94788cdc313a1b44392..0000000000000000000000000000000000000000 --- a/spaces/MBZ/LoRA-DreamBooth-Training-UI/utils.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import pathlib - - -def find_exp_dirs(ignore_repo: bool = False) -> list[str]: - repo_dir = pathlib.Path(__file__).parent - exp_root_dir = repo_dir / 'experiments' - if not exp_root_dir.exists(): - return [] - exp_dirs = sorted(exp_root_dir.glob('*')) - exp_dirs = [ - exp_dir for exp_dir in exp_dirs - if (exp_dir / 'pytorch_lora_weights.bin').exists() - ] - if ignore_repo: - exp_dirs = [ - exp_dir for exp_dir in exp_dirs if not (exp_dir / '.git').exists() - ] - return [path.relative_to(repo_dir).as_posix() for path in exp_dirs] - - -def save_model_card( - save_dir: pathlib.Path, - base_model: str, - instance_prompt: str, - test_prompt: str = '', - test_image_dir: str = '', -) -> None: - image_str = '' - if test_prompt and test_image_dir: - image_paths = sorted((save_dir / test_image_dir).glob('*')) - if image_paths: - image_str = f'Test prompt: {test_prompt}\n' - for image_path in image_paths: - rel_path = image_path.relative_to(save_dir) - image_str += f'![{image_path.stem}]({rel_path})\n' - - model_card = f'''--- -license: creativeml-openrail-m -base_model: {base_model} -instance_prompt: {instance_prompt} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -- lora -inference: true ---- -# LoRA DreamBooth - {save_dir.name} - -These are LoRA adaption weights for [{base_model}](https://huggingface.co/{base_model}). The weights were trained on the instance prompt "{instance_prompt}" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. - -{image_str} -''' - - with open(save_dir / 'README.md', 'w') as f: - f.write(model_card) diff --git a/spaces/MMMMQZ/MQZGPT/modules/shared.py b/spaces/MMMMQZ/MQZGPT/modules/shared.py deleted file mode 100644 index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000 --- a/spaces/MMMMQZ/MQZGPT/modules/shared.py +++ /dev/null @@ -1,55 +0,0 @@ -from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST -import os -import queue - -class State: - interrupted = False - multi_api_key = False - completion_url = COMPLETION_URL - balance_api_url = BALANCE_API_URL - usage_api_url = USAGE_API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_host(self, api_host): - self.completion_url = f"https://{api_host}/v1/chat/completions" - self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants" - self.usage_api_url = f"https://{api_host}/dashboard/billing/usage" - os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1" - - def reset_api_host(self): - self.completion_url = COMPLETION_URL - self.balance_api_url = BALANCE_API_URL - self.usage_api_url = USAGE_API_URL - os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1" - return API_HOST - - def reset_all(self): - self.interrupted = False - self.completion_url = COMPLETION_URL - - def set_api_key_queue(self, api_key_list): - self.multi_api_key = True - self.api_key_queue = queue.Queue() - for api_key in api_key_list: - self.api_key_queue.put(api_key) - - def switching_api_key(self, func): - if not hasattr(self, "api_key_queue"): - return func - - def wrapped(*args, **kwargs): - api_key = self.api_key_queue.get() - args[0].api_key = api_key - ret = func(*args, **kwargs) - self.api_key_queue.put(api_key) - return ret - - return wrapped - - -state = State() diff --git a/spaces/MZhaovo/Llama_Difu/llama_func.py b/spaces/MZhaovo/Llama_Difu/llama_func.py deleted file mode 100644 index 28282b21ffb615092e4874e5eac1917b02b4e454..0000000000000000000000000000000000000000 --- a/spaces/MZhaovo/Llama_Difu/llama_func.py +++ /dev/null @@ -1,180 +0,0 @@ -import os -from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, download_loader -from llama_index import Document, LLMPredictor, PromptHelper, QuestionAnswerPrompt, JSONReader -from langchain.llms import OpenAIChat, OpenAI -from zipfile import ZipFile -from googlesearch import search as google_search -from baidusearch.baidusearch import search as baidu_search -import traceback -import openai - -from utils import * - -def save_index(index, index_name, exist_ok=False): - file_path = f"./index/{index_name}.json" - - if not os.path.exists(file_path) or exist_ok: - index.save_to_disk(file_path) - print(f'Saved file "{file_path}".') - else: - i = 1 - while True: - new_file_path = f'{os.path.splitext(file_path)[0]}_{i}{os.path.splitext(file_path)[1]}' - if not os.path.exists(new_file_path): - index.save_to_disk(new_file_path) - print(f'Saved file "{new_file_path}".') - break - i += 1 - -def construct_index(api_key, file_list, index_name, max_input_size=4096, num_outputs=512, max_chunk_overlap=20, raw=False): - documents = [] - if not raw: - txt_set = [] - for file in file_list: - if os.path.splitext(file.name)[1] == '.pdf': - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == '.docx': - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == '.epub': - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - with open(file.name, 'r', encoding="utf-8") as f: - txt_set.append(f.read()) - documents += [Document(k) for k in txt_set] - else: - documents += [Document(k.text.encode("UTF-8", errors="strict").decode()) for k in file_list] - - # Customizing LLM - llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=api_key)) - prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap) - - index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper) - - if not raw: - save_index(index, index_name) - newlist = refresh_json_list(plain=True) - return newlist, newlist - else: - save_index(index, index_name, exist_ok=True) - return index - -def chat_ai(api_key, index_select, question, prompt_tmpl, sim_k, chat_tone ,context, chatbot, search_mode=[], suggested_user_question = ""): - os.environ["OPENAI_API_KEY"] = api_key - print(f"Question: {question}") - if question=="": - question = suggested_user_question - - if chat_tone == 0: - temprature = 2 - elif chat_tone == 1: - temprature = 1 - else: - temprature = 0.5 - - if not search_mode: - response = ask_ai(api_key, index_select, question, prompt_tmpl, sim_k, temprature, context) - else: - print(f"You asked: {question}") - BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader") - loader = BeautifulSoupWebReader() - chat = OpenAI(model_name="gpt-3.5-turbo", openai_api_key=api_key) - search_terms = chat.generate([f"Please extract search terms from the user’s question. The search terms is a concise sentence, which will be searched on Google to obtain relevant information to answer the user’s question, too generalized search terms doesn’t help. Please provide no more than two search terms. Please provide the most relevant search terms only, the search terms should directly correspond to the user’s question. Please separate different search items with commas, with no quote marks. The user’s question is: {question}"]).generations[0][0].text.strip() - search_terms = search_terms.replace('"', '') - search_terms = search_terms.replace(".", "") - links = [] - for keywords in search_terms.split(","): - keywords = keywords.strip() - for search_engine in search_mode: - if "Google" in search_engine: - print(f"Googling: {keywords}") - search_iter = google_search(keywords, num_results=5) - links += [next(search_iter) for _ in range(10)] - if "Baidu" in search_engine: - print(f"Baiduing: {keywords}") - search_results = baidu_search(keywords, num_results=5) - links += [i["url"] for i in search_results if i["url"].startswith("http") and (not "@" in i["url"])] - if "Manual" in search_engine: - print(f"Searching manually: {keywords}") - print("Please input links manually. (Enter 'q' to quit.)") - while True: - link = input("请手动输入一个链接:\n") - if link == "q": - break - else: - links.append(link) - links = list(set(links)) - if len(links) == 0: - msg = "No links found." - print(msg) - chatbot.append((question, msg)) - return context, chatbot, gr.Dropdown.update(choices=[]) - print("Extracting data from links...") - print('\n'.join(links)) - documents = loader.load_data(urls=links) - # convert to utf-8 encoding - - index = construct_index(api_key, documents, " ".join(search_terms.split(",")), raw=True) - - print("Generating response...") - response = ask_ai(api_key, index_select, question, prompt_tmpl, sim_k, temprature, context, raw = index) - response = response.split("\n") - suggested_next_turns = [] - for index, line in enumerate(response): - if "next user turn" in line: - suggested_next_turns = response[index+1:] - response = response[:index] - break - suggested_next_turns = [i.split()[1] for i in suggested_next_turns] - response = "\n".join(response) - response = parse_text(response) - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, response)) - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, gr.Dropdown.update(choices=suggested_next_turns) - - - -def ask_ai(api_key, index_select, question, prompt_tmpl, sim_k=1, temprature=0, prefix_messages=[], raw = None): - os.environ["OPENAI_API_KEY"] = api_key - if raw is not None: - index = raw - else: - index = load_index(index_select) - - prompt = QuestionAnswerPrompt(prompt_tmpl) - - llm_predictor = LLMPredictor(llm=OpenAI(temperature=temprature, model_name="gpt-3.5-turbo", openai_api_key=api_key, prefix_messages=prefix_messages)) - - try: - response = index.query(question, llm_predictor=llm_predictor, similarity_top_k=sim_k, text_qa_template=prompt) - except: - traceback.print_exc() - return "" - - print(f"Response: {response.response}") - os.environ["OPENAI_API_KEY"] = "" - return response.response - - -def load_index(index_name): - index_path = f"./index/{index_name}.json" - if not os.path.exists(index_path): - return None - - index = GPTSimpleVectorIndex.load_from_disk(index_path) - return index - -def display_json(json_select): - json_path = f"./index/{json_select}.json" - if not os.path.exists(json_path): - return None - documents = JSONReader().load_data(f"./index/{json_select}.json") - - return documents[0] diff --git a/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/utils.py b/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/utils.py deleted file mode 100644 index 92e696511242a28a5a929b286f143c1b4d235009..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.WARNING) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - ckptname = checkpoint_path.split("/")[-1] - newest_step = int(ckptname.split(".")[0].split("_")[1]) - last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step-3000)) - if newest_step >= 3000: - os.system(f"rm {last_ckptname}") - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Makiing/coolb-in-gtest/src/app/page.tsx b/spaces/Makiing/coolb-in-gtest/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/Makiing/coolb-in-gtest/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
    - - - ) -} diff --git a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/threejs/three.min.js b/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/threejs/three.min.js deleted file mode 100644 index c6b34a74644c00f08a36b19a13ebeef111e5f9f8..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/pymo/mocapplayer/libs/threejs/three.min.js +++ /dev/null @@ -1,838 +0,0 @@ -// threejs.org/license -(function(l,sa){"object"===typeof exports&&"undefined"!==typeof module?sa(exports):"function"===typeof define&&define.amd?define(["exports"],sa):sa(l.THREE=l.THREE||{})})(this,function(l){function sa(){}function B(a,b){this.x=a||0;this.y=b||0}function da(a,b,c,d,e,f,g,h,k,m){Object.defineProperty(this,"id",{value:ee++});this.uuid=T.generateUUID();this.sourceFile=this.name="";this.image=void 0!==a?a:da.DEFAULT_IMAGE;this.mipmaps=[];this.mapping=void 0!==b?b:da.DEFAULT_MAPPING;this.wrapS=void 0!==c? -c:1001;this.wrapT=void 0!==d?d:1001;this.magFilter=void 0!==e?e:1006;this.minFilter=void 0!==f?f:1008;this.anisotropy=void 0!==k?k:1;this.format=void 0!==g?g:1023;this.type=void 0!==h?h:1009;this.offset=new B(0,0);this.repeat=new B(1,1);this.generateMipmaps=!0;this.premultiplyAlpha=!1;this.flipY=!0;this.unpackAlignment=4;this.encoding=void 0!==m?m:3E3;this.version=0;this.onUpdate=null}function ga(a,b,c,d){this.x=a||0;this.y=b||0;this.z=c||0;this.w=void 0!==d?d:1}function Db(a,b,c){this.uuid=T.generateUUID(); -this.width=a;this.height=b;this.scissor=new ga(0,0,a,b);this.scissorTest=!1;this.viewport=new ga(0,0,a,b);c=c||{};void 0===c.minFilter&&(c.minFilter=1006);this.texture=new da(void 0,void 0,c.wrapS,c.wrapT,c.magFilter,c.minFilter,c.format,c.type,c.anisotropy,c.encoding);this.depthBuffer=void 0!==c.depthBuffer?c.depthBuffer:!0;this.stencilBuffer=void 0!==c.stencilBuffer?c.stencilBuffer:!0;this.depthTexture=void 0!==c.depthTexture?c.depthTexture:null}function Eb(a,b,c){Db.call(this,a,b,c);this.activeMipMapLevel= -this.activeCubeFace=0}function ba(a,b,c,d){this._x=a||0;this._y=b||0;this._z=c||0;this._w=void 0!==d?d:1}function q(a,b,c){this.x=a||0;this.y=b||0;this.z=c||0}function J(){this.elements=new Float32Array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]);0= -d||0 0 ) {\nfloat depth = gl_FragCoord.z / gl_FragCoord.w;\nfloat fogFactor = 0.0;\nif ( fogType == 1 ) {\nfogFactor = smoothstep( fogNear, fogFar, depth );\n} else {\nconst float LOG2 = 1.442695;\nfogFactor = exp2( - fogDensity * fogDensity * depth * depth * LOG2 );\nfogFactor = 1.0 - clamp( fogFactor, 0.0, 1.0 );\n}\ngl_FragColor = mix( gl_FragColor, vec4( fogColor, gl_FragColor.w ), fogFactor );\n}\n}"].join("\n")); -y.compileShader(P);y.compileShader(R);y.attachShader(N,P);y.attachShader(N,R);y.linkProgram(N);M=N;u=y.getAttribLocation(M,"position");v=y.getAttribLocation(M,"uv");c=y.getUniformLocation(M,"uvOffset");d=y.getUniformLocation(M,"uvScale");e=y.getUniformLocation(M,"rotation");f=y.getUniformLocation(M,"scale");g=y.getUniformLocation(M,"color");h=y.getUniformLocation(M,"map");k=y.getUniformLocation(M,"opacity");m=y.getUniformLocation(M,"modelViewMatrix");w=y.getUniformLocation(M,"projectionMatrix");n= -y.getUniformLocation(M,"fogType");p=y.getUniformLocation(M,"fogDensity");r=y.getUniformLocation(M,"fogNear");x=y.getUniformLocation(M,"fogFar");l=y.getUniformLocation(M,"fogColor");D=y.getUniformLocation(M,"alphaTest");N=document.createElementNS("http://www.w3.org/1999/xhtml","canvas");N.width=8;N.height=8;P=N.getContext("2d");P.fillStyle="white";P.fillRect(0,0,8,8);ca=new da(N);ca.needsUpdate=!0}y.useProgram(M);E.initAttributes();E.enableAttribute(u);E.enableAttribute(v);E.disableUnusedAttributes(); -E.disable(y.CULL_FACE);E.enable(y.BLEND);y.bindBuffer(y.ARRAY_BUFFER,H);y.vertexAttribPointer(u,2,y.FLOAT,!1,16,0);y.vertexAttribPointer(v,2,y.FLOAT,!1,16,8);y.bindBuffer(y.ELEMENT_ARRAY_BUFFER,F);y.uniformMatrix4fv(w,!1,Ka.projectionMatrix.elements);E.activeTexture(y.TEXTURE0);y.uniform1i(h,0);P=N=0;(R=q.fog)?(y.uniform3f(l,R.color.r,R.color.g,R.color.b),R&&R.isFog?(y.uniform1f(r,R.near),y.uniform1f(x,R.far),y.uniform1i(n,1),P=N=1):R&&R.isFogExp2&&(y.uniform1f(p,R.density),y.uniform1i(n,2),P=N=2)): -(y.uniform1i(n,0),P=N=0);for(var R=0,S=b.length;R/g,function(a,c){var d=X[c];if(void 0===d)throw Error("Can not resolve #include <"+c+">");return Cd(d)})}function ve(a){return a.replace(/for \( int i \= (\d+)\; i < (\d+)\; i \+\+ \) \{([\s\S]+?)(?=\})\}/g,function(a,c,d,e){a="";for(c=parseInt(c);cb||a.height>b){var c=b/Math.max(a.width,a.height),d=document.createElementNS("http://www.w3.org/1999/xhtml","canvas");d.width=Math.floor(a.width*c);d.height=Math.floor(a.height*c);d.getContext("2d").drawImage(a,0,0,a.width,a.height,0,0,d.width,d.height);console.warn("THREE.WebGLRenderer: image is too big ("+a.width+"x"+a.height+"). Resized to "+d.width+"x"+d.height,a);return d}return a} -function k(a){return T.isPowerOfTwo(a.width)&&T.isPowerOfTwo(a.height)}function m(b){return 1003===b||1004===b||1005===b?a.NEAREST:a.LINEAR}function w(b){b=b.target;b.removeEventListener("dispose",w);a:{var c=d.get(b);if(b.image&&c.__image__webglTextureCube)a.deleteTexture(c.__image__webglTextureCube);else{if(void 0===c.__webglInit)break a;a.deleteTexture(c.__webglTexture)}d["delete"](b)}q.textures--}function n(b){b=b.target;b.removeEventListener("dispose",n);var c=d.get(b),e=d.get(b.texture);if(b){void 0!== -e.__webglTexture&&a.deleteTexture(e.__webglTexture);b.depthTexture&&b.depthTexture.dispose();if(b&&b.isWebGLRenderTargetCube)for(e=0;6>e;e++)a.deleteFramebuffer(c.__webglFramebuffer[e]),c.__webglDepthbuffer&&a.deleteRenderbuffer(c.__webglDepthbuffer[e]);else a.deleteFramebuffer(c.__webglFramebuffer),c.__webglDepthbuffer&&a.deleteRenderbuffer(c.__webglDepthbuffer);d["delete"](b.texture);d["delete"](b)}q.textures--}function p(b,g){var m=d.get(b);if(0x;x++)l[x]=p||n?n?b.image[x].image:b.image[x]:h(b.image[x],e.maxCubemapSize);var t=k(l[0]),u=f(b.format),ja=f(b.type);r(a.TEXTURE_CUBE_MAP, -b,t);for(x=0;6>x;x++)if(p)for(var B,C=l[x].mipmaps,z=0,N=C.length;zm;m++)e.__webglFramebuffer[m]=a.createFramebuffer()}else e.__webglFramebuffer=a.createFramebuffer();if(g){c.bindTexture(a.TEXTURE_CUBE_MAP,f.__webglTexture);r(a.TEXTURE_CUBE_MAP,b.texture,h);for(m=0;6>m;m++)l(e.__webglFramebuffer[m],b,a.COLOR_ATTACHMENT0,a.TEXTURE_CUBE_MAP_POSITIVE_X+m);b.texture.generateMipmaps&&h&&a.generateMipmap(a.TEXTURE_CUBE_MAP);c.bindTexture(a.TEXTURE_CUBE_MAP, -null)}else c.bindTexture(a.TEXTURE_2D,f.__webglTexture),r(a.TEXTURE_2D,b.texture,h),l(e.__webglFramebuffer,b,a.COLOR_ATTACHMENT0,a.TEXTURE_2D),b.texture.generateMipmaps&&h&&a.generateMipmap(a.TEXTURE_2D),c.bindTexture(a.TEXTURE_2D,null);if(b.depthBuffer){e=d.get(b);f=b&&b.isWebGLRenderTargetCube;if(b.depthTexture){if(f)throw Error("target.depthTexture not supported in Cube render targets");if(b&&b.isWebGLRenderTargetCube)throw Error("Depth Texture with cube render targets is not supported!");a.bindFramebuffer(a.FRAMEBUFFER, -e.__webglFramebuffer);if(!b.depthTexture||!b.depthTexture.isDepthTexture)throw Error("renderTarget.depthTexture must be an instance of THREE.DepthTexture");d.get(b.depthTexture).__webglTexture&&b.depthTexture.image.width===b.width&&b.depthTexture.image.height===b.height||(b.depthTexture.image.width=b.width,b.depthTexture.image.height=b.height,b.depthTexture.needsUpdate=!0);p(b.depthTexture,0);e=d.get(b.depthTexture).__webglTexture;if(1026===b.depthTexture.format)a.framebufferTexture2D(a.FRAMEBUFFER, -a.DEPTH_ATTACHMENT,a.TEXTURE_2D,e,0);else if(1027===b.depthTexture.format)a.framebufferTexture2D(a.FRAMEBUFFER,a.DEPTH_STENCIL_ATTACHMENT,a.TEXTURE_2D,e,0);else throw Error("Unknown depthTexture format");}else if(f)for(e.__webglDepthbuffer=[],f=0;6>f;f++)a.bindFramebuffer(a.FRAMEBUFFER,e.__webglFramebuffer[f]),e.__webglDepthbuffer[f]=a.createRenderbuffer(),t(e.__webglDepthbuffer[f],b);else a.bindFramebuffer(a.FRAMEBUFFER,e.__webglFramebuffer),e.__webglDepthbuffer=a.createRenderbuffer(),t(e.__webglDepthbuffer, -b);a.bindFramebuffer(a.FRAMEBUFFER,null)}};this.updateRenderTargetMipmap=function(b){var e=b.texture;e.generateMipmaps&&k(b)&&1003!==e.minFilter&&1006!==e.minFilter&&(b=b&&b.isWebGLRenderTargetCube?a.TEXTURE_CUBE_MAP:a.TEXTURE_2D,e=d.get(e).__webglTexture,c.bindTexture(b,e),a.generateMipmap(b),c.bindTexture(b,null))}}function xf(){var a={};return{get:function(b){b=b.uuid;var c=a[b];void 0===c&&(c={},a[b]=c);return c},"delete":function(b){delete a[b.uuid]},clear:function(){a={}}}}function yf(a,b,c){function d(b, -c,d){var e=new Uint8Array(4),f=a.createTexture();a.bindTexture(b,f);a.texParameteri(b,a.TEXTURE_MIN_FILTER,a.NEAREST);a.texParameteri(b,a.TEXTURE_MAG_FILTER,a.NEAREST);for(b=0;b=ia.maxTextures&&console.warn("WebGLRenderer: trying to use "+a+" texture units while this GPU supports only "+ia.maxTextures);da+=1;return a};this.setTexture2D=function(){var a=!1;return function(b,c){b&&b.isWebGLRenderTarget&&(a||(console.warn("THREE.WebGLRenderer.setTexture2D: don't use render targets as textures. Use their .texture property instead."), -a=!0),b=b.texture);ua.setTexture2D(b,c)}}();this.setTexture=function(){var a=!1;return function(b,c){a||(console.warn("THREE.WebGLRenderer: .setTexture is deprecated, use setTexture2D instead."),a=!0);ua.setTexture2D(b,c)}}();this.setTextureCube=function(){var a=!1;return function(b,c){b&&b.isWebGLRenderTargetCube&&(a||(console.warn("THREE.WebGLRenderer.setTextureCube: don't use cube render targets as textures. Use their .texture property instead."),a=!0),b=b.texture);b&&b.isCubeTexture||Array.isArray(b.image)&& -6===b.image.length?ua.setTextureCube(b,c):ua.setTextureCubeDynamic(b,c)}}();this.getCurrentRenderTarget=function(){return V};this.setRenderTarget=function(a){(V=a)&&void 0===ea.get(a).__webglFramebuffer&&ua.setupRenderTarget(a);var b=a&&a.isWebGLRenderTargetCube,c;a?(c=ea.get(a),c=b?c.__webglFramebuffer[a.activeCubeFace]:c.__webglFramebuffer,X.copy(a.scissor),fb=a.scissorTest,$a.copy(a.viewport)):(c=null,X.copy(ha).multiplyScalar(Qa),fb=la,$a.copy(fa).multiplyScalar(Qa));T!==c&&(A.bindFramebuffer(A.FRAMEBUFFER, -c),T=c);Y.scissor(X);Y.setScissorTest(fb);Y.viewport($a);b&&(b=ea.get(a.texture),A.framebufferTexture2D(A.FRAMEBUFFER,A.COLOR_ATTACHMENT0,A.TEXTURE_CUBE_MAP_POSITIVE_X+a.activeCubeFace,b.__webglTexture,a.activeMipMapLevel))};this.readRenderTargetPixels=function(a,b,c,d,e,f){if(!1===(a&&a.isWebGLRenderTarget))console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not THREE.WebGLRenderTarget.");else{var g=ea.get(a).__webglFramebuffer;if(g){var h=!1;g!==T&&(A.bindFramebuffer(A.FRAMEBUFFER, -g),h=!0);try{var k=a.texture,m=k.format,n=k.type;1023!==m&&u(m)!==A.getParameter(A.IMPLEMENTATION_COLOR_READ_FORMAT)?console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not in RGBA or implementation defined format."):1009===n||u(n)===A.getParameter(A.IMPLEMENTATION_COLOR_READ_TYPE)||1015===n&&(ka.get("OES_texture_float")||ka.get("WEBGL_color_buffer_float"))||1016===n&&ka.get("EXT_color_buffer_half_float")?A.checkFramebufferStatus(A.FRAMEBUFFER)===A.FRAMEBUFFER_COMPLETE?0<=b&& -b<=a.width-d&&0<=c&&c<=a.height-e&&A.readPixels(b,c,d,e,u(m),u(n),f):console.error("THREE.WebGLRenderer.readRenderTargetPixels: readPixels from renderTarget failed. Framebuffer not complete."):console.error("THREE.WebGLRenderer.readRenderTargetPixels: renderTarget is not in UnsignedByteType or implementation defined type.")}finally{h&&A.bindFramebuffer(A.FRAMEBUFFER,T)}}}}}function Ib(a,b){this.name="";this.color=new O(a);this.density=void 0!==b?b:2.5E-4}function Jb(a,b,c){this.name="";this.color= -new O(a);this.near=void 0!==b?b:1;this.far=void 0!==c?c:1E3}function jb(){z.call(this);this.type="Scene";this.overrideMaterial=this.fog=this.background=null;this.autoUpdate=!0}function Ed(a,b,c,d,e){z.call(this);this.lensFlares=[];this.positionScreen=new q;this.customUpdateCallback=void 0;void 0!==a&&this.add(a,b,c,d,e)}function kb(a){U.call(this);this.type="SpriteMaterial";this.color=new O(16777215);this.map=null;this.rotation=0;this.lights=this.fog=!1;this.setValues(a)}function qc(a){z.call(this); -this.type="Sprite";this.material=void 0!==a?a:new kb}function rc(){z.call(this);this.type="LOD";Object.defineProperties(this,{levels:{enumerable:!0,value:[]}})}function lb(a,b,c,d,e,f,g,h,k,m,w,n){da.call(this,null,f,g,h,k,m,d,e,w,n);this.image={data:a,width:b,height:c};this.magFilter=void 0!==k?k:1003;this.minFilter=void 0!==m?m:1003;this.flipY=this.generateMipmaps=!1;this.unpackAlignment=1}function bd(a,b,c){this.useVertexTexture=void 0!==c?c:!0;this.identityMatrix=new J;a=a||[];this.bones=a.slice(0); -this.useVertexTexture?(a=Math.sqrt(4*this.bones.length),a=T.nextPowerOfTwo(Math.ceil(a)),this.boneTextureHeight=this.boneTextureWidth=a=Math.max(a,4),this.boneMatrices=new Float32Array(this.boneTextureWidth*this.boneTextureHeight*4),this.boneTexture=new lb(this.boneMatrices,this.boneTextureWidth,this.boneTextureHeight,1023,1015)):this.boneMatrices=new Float32Array(16*this.bones.length);if(void 0===b)this.calculateInverses();else if(this.bones.length===b.length)this.boneInverses=b.slice(0);else for(console.warn("THREE.Skeleton bonInverses is the wrong length."), -this.boneInverses=[],b=0,a=this.bones.length;b=a.HAVE_CURRENT_DATA&&(w.needsUpdate=!0)}da.call(this,a,b,c,d,e,f,g,h,k);this.generateMipmaps=!1;var w=this;m()}function Lb(a,b,c,d,e,f,g,h,k,m,w,n){da.call(this,null,f,g,h,k,m,d,e,w,n);this.image={width:b,height:c};this.mipmaps=a;this.generateMipmaps=this.flipY=!1}function fd(a,b,c,d,e,f,g,h,k){da.call(this,a,b,c,d,e,f,g,h,k);this.needsUpdate=!0}function tc(a,b,c,d,e,f,g, -h,k,m){m=void 0!==m?m:1026;if(1026!==m&&1027!==m)throw Error("DepthTexture format must be either THREE.DepthFormat or THREE.DepthStencilFormat");da.call(this,null,d,e,f,g,h,m,c,k);this.image={width:a,height:b};this.type=void 0!==c?c:1012;this.magFilter=void 0!==g?g:1003;this.minFilter=void 0!==h?h:1003;this.generateMipmaps=this.flipY=!1}function Mb(a){function b(a,b){return a-b}G.call(this);var c=[0,0],d={},e=["a","b","c"];if(a&&a.isGeometry){var f=a.vertices,g=a.faces,h=0,k=new Uint32Array(6*g.length); -a=0;for(var m=g.length;an;n++){c[0]=w[e[n]];c[1]=w[e[(n+1)%3]];c.sort(b);var p=c.toString();void 0===d[p]&&(k[2*h]=c[0],k[2*h+1]=c[1],d[p]=!0,h++)}c=new Float32Array(6*h);a=0;for(m=h;an;n++)d=f[k[2*a+n]],h=6*a+3*n,c[h+0]=d.x,c[h+1]=d.y,c[h+2]=d.z;this.addAttribute("position",new C(c,3))}else if(a&&a.isBufferGeometry){if(null!==a.index){m=a.index.array;f=a.attributes.position;e=a.groups;h=0;0===e.length&&a.addGroup(0,m.length);k=new Uint32Array(2*m.length); -g=0;for(w=e.length;gn;n++)c[0]=m[a+n],c[1]=m[a+(n+1)%3],c.sort(b),p=c.toString(),void 0===d[p]&&(k[2*h]=c[0],k[2*h+1]=c[1],d[p]=!0,h++)}c=new Float32Array(6*h);a=0;for(m=h;an;n++)h=6*a+3*n,d=k[2*a+n],c[h+0]=f.getX(d),c[h+1]=f.getY(d),c[h+2]=f.getZ(d)}else for(f=a.attributes.position.array,h=f.length/3,k=h/3,c=new Float32Array(6*h),a=0,m=k;an;n++)h=18*a+6*n,k=9*a+3*n,c[h+0]=f[k],c[h+1]=f[k+1], -c[h+2]=f[k+2],d=9*a+(n+1)%3*3,c[h+3]=f[d],c[h+4]=f[d+1],c[h+5]=f[d+2];this.addAttribute("position",new C(c,3))}}function Nb(a,b,c){G.call(this);this.type="ParametricBufferGeometry";this.parameters={func:a,slices:b,stacks:c};var d=[],e=[],f,g,h,k,m,w=b+1;for(f=0;f<=c;f++)for(m=f/c,g=0;g<=b;g++)k=g/b,h=a(k,m),d.push(h.x,h.y,h.z),e.push(k,m);a=[];var n;for(f=0;fd&&1===a.x&&(k[b]=a.x-1);0===c.x&&0===c.z&&(k[b]=d/2/Math.PI+.5)}G.call(this);this.type="PolyhedronBufferGeometry";this.parameters= -{vertices:a,indices:b,radius:c,detail:d};c=c||1;var h=[],k=[];(function(a){for(var c=new q,d=new q,g=new q,h=0;he&&(.2>b&&(k[a+0]+=1),.2>c&&(k[a+2]+=1),.2>d&&(k[a+4]+=1))})();this.addAttribute("position",ha(h,3));this.addAttribute("normal",ha(h.slice(),3));this.addAttribute("uv",ha(k,2));this.normalizeNormals();this.boundingSphere=new Ca(new q, -c)}function Ob(a,b){ua.call(this,[1,1,1,-1,-1,1,-1,1,-1,1,-1,-1],[2,1,0,0,3,2,1,3,0,2,3,1],a,b);this.type="TetrahedronBufferGeometry";this.parameters={radius:a,detail:b}}function vc(a,b){Q.call(this);this.type="TetrahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Ob(a,b));this.mergeVertices()}function Pb(a,b){ua.call(this,[1,0,0,-1,0,0,0,1,0,0,-1,0,0,0,1,0,0,-1],[0,2,4,0,4,3,0,3,5,0,5,2,1,2,5,1,5,3,1,3,4,1,4,2],a,b);this.type="OctahedronBufferGeometry";this.parameters= -{radius:a,detail:b}}function wc(a,b){Q.call(this);this.type="OctahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Pb(a,b));this.mergeVertices()}function Qb(a,b){var c=(1+Math.sqrt(5))/2;ua.call(this,[-1,c,0,1,c,0,-1,-c,0,1,-c,0,0,-1,c,0,1,c,0,-1,-c,0,1,-c,c,0,-1,c,0,1,-c,0,-1,-c,0,1],[0,11,5,0,5,1,0,1,7,0,7,10,0,10,11,1,5,9,5,11,4,11,10,2,10,7,6,7,1,8,3,9,4,3,4,2,3,2,6,3,6,8,3,8,9,4,9,5,2,4,11,6,2,10,8,6,7,9,8,1],a,b);this.type="IcosahedronBufferGeometry";this.parameters= -{radius:a,detail:b}}function xc(a,b){Q.call(this);this.type="IcosahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Qb(a,b));this.mergeVertices()}function Rb(a,b){var c=(1+Math.sqrt(5))/2,d=1/c;ua.call(this,[-1,-1,-1,-1,-1,1,-1,1,-1,-1,1,1,1,-1,-1,1,-1,1,1,1,-1,1,1,1,0,-d,-c,0,-d,c,0,d,-c,0,d,c,-d,-c,0,-d,c,0,d,-c,0,d,c,0,-c,0,-d,c,0,-d,-c,0,d,c,0,d],[3,11,7,3,7,15,3,15,13,7,19,17,7,17,6,7,6,15,17,4,8,17,8,10,17,10,6,8,0,16,8,16,2,8,2,10,0,12,1,0,1,18,0,18,16,6,10,2, -6,2,13,6,13,15,2,16,18,2,18,3,2,3,13,18,1,9,18,9,11,18,11,3,4,14,12,4,12,0,4,0,8,11,9,5,11,5,19,11,19,7,19,5,14,19,14,4,19,4,17,1,12,14,1,14,5,1,5,9],a,b);this.type="DodecahedronBufferGeometry";this.parameters={radius:a,detail:b}}function yc(a,b){Q.call(this);this.type="DodecahedronGeometry";this.parameters={radius:a,detail:b};this.fromBufferGeometry(new Rb(a,b));this.mergeVertices()}function zc(a,b,c,d){Q.call(this);this.type="PolyhedronGeometry";this.parameters={vertices:a,indices:b,radius:c,detail:d}; -this.fromBufferGeometry(new ua(a,b,c,d));this.mergeVertices()}function Sb(a,b,c,d,e){function f(e){var f=a.getPointAt(e/b),m=g.normals[e];e=g.binormals[e];for(n=0;n<=d;n++){var w=n/d*Math.PI*2,l=Math.sin(w),w=-Math.cos(w);k.x=w*m.x+l*e.x;k.y=w*m.y+l*e.y;k.z=w*m.z+l*e.z;k.normalize();r.push(k.x,k.y,k.z);h.x=f.x+c*k.x;h.y=f.y+c*k.y;h.z=f.z+c*k.z;p.push(h.x,h.y,h.z)}}G.call(this);this.type="TubeBufferGeometry";this.parameters={path:a,tubularSegments:b,radius:c,radialSegments:d,closed:e};b=b||64;c=c|| -1;d=d||8;e=e||!1;var g=a.computeFrenetFrames(b,e);this.tangents=g.tangents;this.normals=g.normals;this.binormals=g.binormals;var h=new q,k=new q,m=new B,w,n,p=[],r=[],l=[],t=[];for(w=0;wp;p++){e[0]=n[g[p]];e[1]=n[g[(p+1)%3]];e.sort(c);var l=e.toString();void 0===f[l]?f[l]={vert1:e[0],vert2:e[1],face1:m,face2:void 0}:f[l].face2=m}e=[];for(l in f)if(g=f[l],void 0===g.face2||h[g.face1].normal.dot(h[g.face2].normal)<=d)m=k[g.vert1],e.push(m.x),e.push(m.y),e.push(m.z),m=k[g.vert2],e.push(m.x),e.push(m.y),e.push(m.z);this.addAttribute("position",new C(new Float32Array(e),3))}function Ua(a, -b,c,d,e,f,g,h){function k(c){var e,f,k,n=new B,p=new q,l=0,w=!0===c?a:b,I=!0===c?1:-1;f=u;for(e=1;e<=d;e++)x.setXYZ(u,0,y*I,0),t.setXYZ(u,0,I,0),n.x=.5,n.y=.5,D.setXY(u,n.x,n.y),u++;k=u;for(e=0;e<=d;e++){var z=e/d*h+g,C=Math.cos(z),z=Math.sin(z);p.x=w*z;p.y=y*I;p.z=w*C;x.setXYZ(u,p.x,p.y,p.z);t.setXYZ(u,0,I,0);n.x=.5*C+.5;n.y=.5*z*I+.5;D.setXY(u,n.x,n.y);u++}for(e=0;ethis.duration&&this.resetDuration();this.optimize()}function ud(a){this.manager=void 0!==a?a:Ga;this.textures={}}function Id(a){this.manager=void 0!==a?a:Ga}function wb(){this.onLoadStart=function(){};this.onLoadProgress=function(){};this.onLoadComplete=function(){}}function Jd(a){"boolean"=== -typeof a&&(console.warn("THREE.JSONLoader: showStatus parameter has been removed from constructor."),a=void 0);this.manager=void 0!==a?a:Ga;this.withCredentials=!1}function xe(a){this.manager=void 0!==a?a:Ga;this.texturePath=""}function ia(){}function Sa(a,b){this.v1=a;this.v2=b}function Oc(){this.curves=[];this.autoClose=!1}function Va(a,b,c,d,e,f,g,h){this.aX=a;this.aY=b;this.xRadius=c;this.yRadius=d;this.aStartAngle=e;this.aEndAngle=f;this.aClockwise=g;this.aRotation=h||0}function xb(a){this.points= -void 0===a?[]:a}function yb(a,b,c,d){this.v0=a;this.v1=b;this.v2=c;this.v3=d}function zb(a,b,c){this.v0=a;this.v1=b;this.v2=c}function Ab(){Pc.apply(this,arguments);this.holes=[]}function Pc(a){Oc.call(this);this.currentPoint=new B;a&&this.fromPoints(a)}function Kd(){this.subPaths=[];this.currentPath=null}function Ld(a){this.data=a}function ye(a){this.manager=void 0!==a?a:Ga}function Md(){void 0===Nd&&(Nd=new (window.AudioContext||window.webkitAudioContext));return Nd}function Od(a){this.manager= -void 0!==a?a:Ga}function ze(){this.type="StereoCamera";this.aspect=1;this.eyeSep=.064;this.cameraL=new Ea;this.cameraL.layers.enable(1);this.cameraL.matrixAutoUpdate=!1;this.cameraR=new Ea;this.cameraR.layers.enable(2);this.cameraR.matrixAutoUpdate=!1}function vd(a,b,c){z.call(this);this.type="CubeCamera";var d=new Ea(90,1,a,b);d.up.set(0,-1,0);d.lookAt(new q(1,0,0));this.add(d);var e=new Ea(90,1,a,b);e.up.set(0,-1,0);e.lookAt(new q(-1,0,0));this.add(e);var f=new Ea(90,1,a,b);f.up.set(0,0,1);f.lookAt(new q(0, -1,0));this.add(f);var g=new Ea(90,1,a,b);g.up.set(0,0,-1);g.lookAt(new q(0,-1,0));this.add(g);var h=new Ea(90,1,a,b);h.up.set(0,-1,0);h.lookAt(new q(0,0,1));this.add(h);var k=new Ea(90,1,a,b);k.up.set(0,-1,0);k.lookAt(new q(0,0,-1));this.add(k);this.renderTarget=new Eb(c,c,{format:1022,magFilter:1006,minFilter:1006});this.updateCubeMap=function(a,b){null===this.parent&&this.updateMatrixWorld();var c=this.renderTarget,p=c.texture.generateMipmaps;c.texture.generateMipmaps=!1;c.activeCubeFace=0;a.render(b, -d,c);c.activeCubeFace=1;a.render(b,e,c);c.activeCubeFace=2;a.render(b,f,c);c.activeCubeFace=3;a.render(b,g,c);c.activeCubeFace=4;a.render(b,h,c);c.texture.generateMipmaps=p;c.activeCubeFace=5;a.render(b,k,c);a.setRenderTarget(null)}}function Pd(){z.call(this);this.type="AudioListener";this.context=Md();this.gain=this.context.createGain();this.gain.connect(this.context.destination);this.filter=null}function dc(a){z.call(this);this.type="Audio";this.context=a.context;this.source=this.context.createBufferSource(); -this.source.onended=this.onEnded.bind(this);this.gain=this.context.createGain();this.gain.connect(a.getInput());this.autoplay=!1;this.startTime=0;this.playbackRate=1;this.isPlaying=!1;this.hasPlaybackControl=!0;this.sourceType="empty";this.filters=[]}function Qd(a){dc.call(this,a);this.panner=this.context.createPanner();this.panner.connect(this.gain)}function Rd(a,b){this.analyser=a.context.createAnalyser();this.analyser.fftSize=void 0!==b?b:2048;this.data=new Uint8Array(this.analyser.frequencyBinCount); -a.getOutput().connect(this.analyser)}function wd(a,b,c){this.binding=a;this.valueSize=c;a=Float64Array;switch(b){case "quaternion":b=this._slerp;break;case "string":case "bool":a=Array;b=this._select;break;default:b=this._lerp}this.buffer=new a(4*c);this._mixBufferRegion=b;this.referenceCount=this.useCount=this.cumulativeWeight=0}function fa(a,b,c){this.path=b;this.parsedPath=c||fa.parseTrackName(b);this.node=fa.findNode(a,this.parsedPath.nodeName)||a;this.rootNode=a}function Sd(a){this.uuid=T.generateUUID(); -this._objects=Array.prototype.slice.call(arguments);this.nCachedObjects_=0;var b={};this._indicesByUUID=b;for(var c=0,d=arguments.length;c!==d;++c)b[arguments[c].uuid]=c;this._paths=[];this._parsedPaths=[];this._bindings=[];this._bindingsIndicesByPath={};var e=this;this.stats={objects:{get total(){return e._objects.length},get inUse(){return this.total-e.nCachedObjects_}},get bindingsPerObject(){return e._bindings.length}}}function Td(a,b,c){this._mixer=a;this._clip=b;this._localRoot=c||null;a=b.tracks; -b=a.length;c=Array(b);for(var d={endingStart:2400,endingEnd:2400},e=0;e!==b;++e){var f=a[e].createInterpolant(null);c[e]=f;f.settings=d}this._interpolantSettings=d;this._interpolants=c;this._propertyBindings=Array(b);this._weightInterpolant=this._timeScaleInterpolant=this._byClipCacheIndex=this._cacheIndex=null;this.loop=2201;this._loopCount=-1;this._startTime=null;this.time=0;this._effectiveWeight=this.weight=this._effectiveTimeScale=this.timeScale=1;this.repetitions=Infinity;this.paused=!1;this.enabled= -!0;this.clampWhenFinished=!1;this.zeroSlopeAtEnd=this.zeroSlopeAtStart=!0}function Ud(a){this._root=a;this._initMemoryManager();this.time=this._accuIndex=0;this.timeScale=1}function Ae(a,b){"string"===typeof a&&(console.warn("THREE.Uniform: Type parameter is no longer needed."),a=b);this.value=a}function Bb(){G.call(this);this.type="InstancedBufferGeometry";this.maxInstancedCount=void 0}function Vd(a,b,c,d){this.uuid=T.generateUUID();this.data=a;this.itemSize=b;this.offset=c;this.normalized=!0=== -d}function ec(a,b){this.uuid=T.generateUUID();this.array=a;this.stride=b;this.count=void 0!==a?a.length/b:0;this.dynamic=!1;this.updateRange={offset:0,count:-1};this.version=0}function fc(a,b,c){ec.call(this,a,b);this.meshPerAttribute=c||1}function gc(a,b,c){C.call(this,a,b);this.meshPerAttribute=c||1}function Wd(a,b,c,d){this.ray=new ab(a,b);this.near=c||0;this.far=d||Infinity;this.params={Mesh:{},Line:{},LOD:{},Points:{threshold:1},Sprite:{}};Object.defineProperties(this.params,{PointCloud:{get:function(){console.warn("THREE.Raycaster: params.PointCloud has been renamed to params.Points."); -return this.Points}}})}function Be(a,b){return a.distance-b.distance}function Xd(a,b,c,d){if(!1!==a.visible&&(a.raycast(b,c),!0===d)){a=a.children;d=0;for(var e=a.length;dc;c++,d++){var e=c/32*Math.PI*2,f=d/32*Math.PI*2;b.push(Math.cos(e),Math.sin(e),1,Math.cos(f),Math.sin(f),1)}a.addAttribute("position",new ha(b,3));b=new oa({fog:!1});this.cone=new la(a,b);this.add(this.cone);this.update()}function ic(a){this.bones=this.getBoneList(a);for(var b=new Q, -c=0;cd;d++)c.faces[d].color=this.colors[4>d?0:1];d=new Ma({vertexColors:1,wireframe:!0});this.lightSphere=new ya(c,d);this.add(this.lightSphere);this.update()}function Sc(a,b,c,d){b=b||1;c=new O(void 0!==c?c:4473924);d=new O(void 0!== -d?d:8947848);for(var e=b/2,f=2*a/b,g=[],h=[],k=0,m=0,l=-a;k<=b;k++,l+=f){g.push(-a,0,l,a,0,l);g.push(l,0,-a,l,0,a);var n=k===e?c:d;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3;n.toArray(h,m);m+=3}a=new G;a.addAttribute("position",new ha(g,3));a.addAttribute("color",new ha(h,3));g=new oa({vertexColors:2});la.call(this,a,g)}function Tc(a,b,c,d){this.object=a;this.size=void 0!==b?b:1;a=void 0!==c?c:16776960;d=void 0!==d?d:1;b=0;(c=this.object.geometry)&&c.isGeometry?b=c.faces.length:console.warn("THREE.FaceNormalsHelper: only THREE.Geometry is supported. Use THREE.VertexNormalsHelper, instead."); -c=new G;b=new ha(6*b,3);c.addAttribute("position",b);la.call(this,c,new oa({color:a,linewidth:d}));this.matrixAutoUpdate=!1;this.update()}function lc(a,b){z.call(this);this.light=a;this.light.updateMatrixWorld();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;void 0===b&&(b=1);var c=new G;c.addAttribute("position",new ha([-b,b,0,b,b,0,b,-b,0,-b,-b,0,-b,b,0],3));var d=new oa({fog:!1});this.add(new Ta(c,d));c=new G;c.addAttribute("position",new ha([0,0,0,0,0,1],3));this.add(new Ta(c,d));this.update()} -function Uc(a){function b(a,b,d){c(a,d);c(b,d)}function c(a,b){d.vertices.push(new q);d.colors.push(new O(b));void 0===f[a]&&(f[a]=[]);f[a].push(d.vertices.length-1)}var d=new Q,e=new oa({color:16777215,vertexColors:1}),f={};b("n1","n2",16755200);b("n2","n4",16755200);b("n4","n3",16755200);b("n3","n1",16755200);b("f1","f2",16755200);b("f2","f4",16755200);b("f4","f3",16755200);b("f3","f1",16755200);b("n1","f1",16755200);b("n2","f2",16755200);b("n3","f3",16755200);b("n4","f4",16755200);b("p","n1",16711680); -b("p","n2",16711680);b("p","n3",16711680);b("p","n4",16711680);b("u1","u2",43775);b("u2","u3",43775);b("u3","u1",43775);b("c","t",16777215);b("p","c",3355443);b("cn1","cn2",3355443);b("cn3","cn4",3355443);b("cf1","cf2",3355443);b("cf3","cf4",3355443);la.call(this,d,e);this.camera=a;this.camera.updateProjectionMatrix&&this.camera.updateProjectionMatrix();this.matrix=a.matrixWorld;this.matrixAutoUpdate=!1;this.pointMap=f;this.update()}function Vc(a,b){var c=void 0!==b?b:8947848;this.object=a;this.box= -new Ba;ya.call(this,new ob(1,1,1),new Ma({color:c,wireframe:!0}))}function Wc(a,b){void 0===b&&(b=16776960);var c=new Uint16Array([0,1,1,2,2,3,3,0,4,5,5,6,6,7,7,4,0,4,1,5,2,6,3,7]),d=new Float32Array(24),e=new G;e.setIndex(new C(c,1));e.addAttribute("position",new C(d,3));la.call(this,e,new oa({color:b}));void 0!==a&&this.update(a)}function Cb(a,b,c,d,e,f){z.call(this);void 0===d&&(d=16776960);void 0===c&&(c=1);void 0===e&&(e=.2*c);void 0===f&&(f=.2*e);this.position.copy(b);this.line=new Ta(Ce,new oa({color:d})); -this.line.matrixAutoUpdate=!1;this.add(this.line);this.cone=new ya(De,new Ma({color:d}));this.cone.matrixAutoUpdate=!1;this.add(this.cone);this.setDirection(a);this.setLength(c,e,f)}function xd(a){a=a||1;var b=new Float32Array([0,0,0,a,0,0,0,0,0,0,a,0,0,0,0,0,0,a]),c=new Float32Array([1,0,0,1,.6,0,0,1,0,.6,1,0,0,0,1,0,.6,1]);a=new G;a.addAttribute("position",new C(b,3));a.addAttribute("color",new C(c,3));b=new oa({vertexColors:2});la.call(this,a,b)}function Ee(a){console.warn("THREE.ClosedSplineCurve3 has been deprecated. Please use THREE.CatmullRomCurve3."); -$d.call(this,a);this.type="catmullrom";this.closed=!0}function yd(a,b,c,d,e,f){Va.call(this,a,b,c,c,d,e,f)}void 0===Number.EPSILON&&(Number.EPSILON=Math.pow(2,-52));void 0===Math.sign&&(Math.sign=function(a){return 0>a?-1:0e;e++)8===e||13===e||18===e||23===e?b[e]="-":14===e?b[e]="4":(2>=c&&(c=33554432+16777216*Math.random()|0),d=c&15,c>>=4,b[e]=a[19===e?d&3|8:d]);return b.join("")}}(),clamp:function(a,b,c){return Math.max(b,Math.min(c,a))},euclideanModulo:function(a,b){return(a%b+b)%b},mapLinear:function(a,b,c,d,e){return d+(a-b)*(e-d)/(c-b)},lerp:function(a,b,c){return(1-c)*a+c*b},smoothstep:function(a, -b,c){if(a<=b)return 0;if(a>=c)return 1;a=(a-b)/(c-b);return a*a*(3-2*a)},smootherstep:function(a,b,c){if(a<=b)return 0;if(a>=c)return 1;a=(a-b)/(c-b);return a*a*a*(a*(6*a-15)+10)},random16:function(){console.warn("THREE.Math.random16() has been deprecated. Use Math.random() instead.");return Math.random()},randInt:function(a,b){return a+Math.floor(Math.random()*(b-a+1))},randFloat:function(a,b){return a+Math.random()*(b-a)},randFloatSpread:function(a){return a*(.5-Math.random())},degToRad:function(a){return a* -T.DEG2RAD},radToDeg:function(a){return a*T.RAD2DEG},isPowerOfTwo:function(a){return 0===(a&a-1)&&0!==a},nearestPowerOfTwo:function(a){return Math.pow(2,Math.round(Math.log(a)/Math.LN2))},nextPowerOfTwo:function(a){a--;a|=a>>1;a|=a>>2;a|=a>>4;a|=a>>8;a|=a>>16;a++;return a}};B.prototype={constructor:B,isVector2:!0,get width(){return this.x},set width(a){this.x=a},get height(){return this.y},set height(a){this.y=a},set:function(a,b){this.x=a;this.y=b;return this},setScalar:function(a){this.y=this.x= -a;return this},setX:function(a){this.x=a;return this},setY:function(a){this.y=a;return this},setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x,this.y)},copy:function(a){this.x=a.x;this.y=a.y;return this},add:function(a,b){if(void 0!== -b)return console.warn("THREE.Vector2: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),this.addVectors(a,b);this.x+=a.x;this.y+=a.y;return this},addScalar:function(a){this.x+=a;this.y+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;return this},addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector2: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."), -this.subVectors(a,b);this.x-=a.x;this.y-=a.y;return this},subScalar:function(a){this.x-=a;this.y-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;return this},multiply:function(a){this.x*=a.x;this.y*=a.y;return this},multiplyScalar:function(a){isFinite(a)?(this.x*=a,this.y*=a):this.y=this.x=0;return this},divide:function(a){this.x/=a.x;this.y/=a.y;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y, -a.y);return this},max:function(a){this.x=Math.max(this.x,a.x);this.y=Math.max(this.y,a.y);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));return this},clampScalar:function(){var a,b;return function(c,d){void 0===a&&(a=new B,b=new B);a.set(c,c);b.set(d,d);return this.clamp(a,b)}}(),clampLength:function(a,b){var c=this.length();return this.multiplyScalar(Math.max(a,Math.min(b,c))/c)},floor:function(){this.x=Math.floor(this.x);this.y= -Math.floor(this.y);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);return this},roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):Math.floor(this.y);return this},negate:function(){this.x=-this.x;this.y=-this.y;return this},dot:function(a){return this.x*a.x+this.y*a.y},lengthSq:function(){return this.x*this.x+this.y*this.y},length:function(){return Math.sqrt(this.x* -this.x+this.y*this.y)},lengthManhattan:function(){return Math.abs(this.x)+Math.abs(this.y)},normalize:function(){return this.divideScalar(this.length())},angle:function(){var a=Math.atan2(this.y,this.x);0>a&&(a+=2*Math.PI);return a},distanceTo:function(a){return Math.sqrt(this.distanceToSquared(a))},distanceToSquared:function(a){var b=this.x-a.x;a=this.y-a.y;return b*b+a*a},distanceToManhattan:function(a){return Math.abs(this.x-a.x)+Math.abs(this.y-a.y)},setLength:function(a){return this.multiplyScalar(a/ -this.length())},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},equals:function(a){return a.x===this.x&&a.y===this.y},fromArray:function(a,b){void 0===b&&(b=0);this.x=a[b];this.y=a[b+1];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;return a},fromAttribute:function(a,b,c){void 0===c&&(c=0);b=b*a.itemSize+c;this.x=a.array[b];this.y=a.array[b+ -1];return this},rotateAround:function(a,b){var c=Math.cos(b),d=Math.sin(b),e=this.x-a.x,f=this.y-a.y;this.x=e*c-f*d+a.x;this.y=e*d+f*c+a.y;return this}};da.DEFAULT_IMAGE=void 0;da.DEFAULT_MAPPING=300;da.prototype={constructor:da,isTexture:!0,set needsUpdate(a){!0===a&&this.version++},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.image=a.image;this.mipmaps=a.mipmaps.slice(0);this.mapping=a.mapping;this.wrapS=a.wrapS;this.wrapT=a.wrapT;this.magFilter=a.magFilter;this.minFilter= -a.minFilter;this.anisotropy=a.anisotropy;this.format=a.format;this.type=a.type;this.offset.copy(a.offset);this.repeat.copy(a.repeat);this.generateMipmaps=a.generateMipmaps;this.premultiplyAlpha=a.premultiplyAlpha;this.flipY=a.flipY;this.unpackAlignment=a.unpackAlignment;this.encoding=a.encoding;return this},toJSON:function(a){if(void 0!==a.textures[this.uuid])return a.textures[this.uuid];var b={metadata:{version:4.4,type:"Texture",generator:"Texture.toJSON"},uuid:this.uuid,name:this.name,mapping:this.mapping, -repeat:[this.repeat.x,this.repeat.y],offset:[this.offset.x,this.offset.y],wrap:[this.wrapS,this.wrapT],minFilter:this.minFilter,magFilter:this.magFilter,anisotropy:this.anisotropy,flipY:this.flipY};if(void 0!==this.image){var c=this.image;void 0===c.uuid&&(c.uuid=T.generateUUID());if(void 0===a.images[c.uuid]){var d=a.images,e=c.uuid,f=c.uuid,g;void 0!==c.toDataURL?g=c:(g=document.createElementNS("http://www.w3.org/1999/xhtml","canvas"),g.width=c.width,g.height=c.height,g.getContext("2d").drawImage(c, -0,0,c.width,c.height));g=2048a.x||1a.x?0:1;break;case 1002:a.x=1===Math.abs(Math.floor(a.x)%2)?Math.ceil(a.x)-a.x:a.x-Math.floor(a.x)}if(0> -a.y||1a.y?0:1;break;case 1002:a.y=1===Math.abs(Math.floor(a.y)%2)?Math.ceil(a.y)-a.y:a.y-Math.floor(a.y)}this.flipY&&(a.y=1-a.y)}}};Object.assign(da.prototype,sa.prototype);var ee=0;ga.prototype={constructor:ga,isVector4:!0,set:function(a,b,c,d){this.x=a;this.y=b;this.z=c;this.w=d;return this},setScalar:function(a){this.w=this.z=this.y=this.x=a;return this},setX:function(a){this.x=a;return this},setY:function(a){this.y=a; -return this},setZ:function(a){this.z=a;return this},setW:function(a){this.w=a;return this},setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;case 2:this.z=b;break;case 3:this.w=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;case 2:return this.z;case 3:return this.w;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x, -this.y,this.z,this.w)},copy:function(a){this.x=a.x;this.y=a.y;this.z=a.z;this.w=void 0!==a.w?a.w:1;return this},add:function(a,b){if(void 0!==b)return console.warn("THREE.Vector4: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),this.addVectors(a,b);this.x+=a.x;this.y+=a.y;this.z+=a.z;this.w+=a.w;return this},addScalar:function(a){this.x+=a;this.y+=a;this.z+=a;this.w+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;this.z=a.z+b.z;this.w=a.w+b.w;return this}, -addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;this.z+=a.z*b;this.w+=a.w*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector4: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."),this.subVectors(a,b);this.x-=a.x;this.y-=a.y;this.z-=a.z;this.w-=a.w;return this},subScalar:function(a){this.x-=a;this.y-=a;this.z-=a;this.w-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;this.z=a.z-b.z;this.w=a.w-b.w;return this},multiplyScalar:function(a){isFinite(a)? -(this.x*=a,this.y*=a,this.z*=a,this.w*=a):this.w=this.z=this.y=this.x=0;return this},applyMatrix4:function(a){var b=this.x,c=this.y,d=this.z,e=this.w;a=a.elements;this.x=a[0]*b+a[4]*c+a[8]*d+a[12]*e;this.y=a[1]*b+a[5]*c+a[9]*d+a[13]*e;this.z=a[2]*b+a[6]*c+a[10]*d+a[14]*e;this.w=a[3]*b+a[7]*c+a[11]*d+a[15]*e;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},setAxisAngleFromQuaternion:function(a){this.w=2*Math.acos(a.w);var b=Math.sqrt(1-a.w*a.w);1E-4>b?(this.x=1,this.z=this.y= -0):(this.x=a.x/b,this.y=a.y/b,this.z=a.z/b);return this},setAxisAngleFromRotationMatrix:function(a){var b,c,d;a=a.elements;var e=a[0];d=a[4];var f=a[8],g=a[1],h=a[5],k=a[9];c=a[2];b=a[6];var m=a[10];if(.01>Math.abs(d-g)&&.01>Math.abs(f-c)&&.01>Math.abs(k-b)){if(.1>Math.abs(d+g)&&.1>Math.abs(f+c)&&.1>Math.abs(k+b)&&.1>Math.abs(e+h+m-3))return this.set(1,0,0,0),this;a=Math.PI;e=(e+1)/2;h=(h+1)/2;m=(m+1)/2;d=(d+g)/4;f=(f+c)/4;k=(k+b)/4;e>h&&e>m?.01>e?(b=0,d=c=.707106781):(b=Math.sqrt(e),c=d/b,d=f/b): -h>m?.01>h?(b=.707106781,c=0,d=.707106781):(c=Math.sqrt(h),b=d/c,d=k/c):.01>m?(c=b=.707106781,d=0):(d=Math.sqrt(m),b=f/d,c=k/d);this.set(b,c,d,a);return this}a=Math.sqrt((b-k)*(b-k)+(f-c)*(f-c)+(g-d)*(g-d));.001>Math.abs(a)&&(a=1);this.x=(b-k)/a;this.y=(f-c)/a;this.z=(g-d)/a;this.w=Math.acos((e+h+m-1)/2);return this},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y,a.y);this.z=Math.min(this.z,a.z);this.w=Math.min(this.w,a.w);return this},max:function(a){this.x=Math.max(this.x,a.x); -this.y=Math.max(this.y,a.y);this.z=Math.max(this.z,a.z);this.w=Math.max(this.w,a.w);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));this.z=Math.max(a.z,Math.min(b.z,this.z));this.w=Math.max(a.w,Math.min(b.w,this.w));return this},clampScalar:function(){var a,b;return function(c,d){void 0===a&&(a=new ga,b=new ga);a.set(c,c,c,c);b.set(d,d,d,d);return this.clamp(a,b)}}(),floor:function(){this.x=Math.floor(this.x);this.y=Math.floor(this.y); -this.z=Math.floor(this.z);this.w=Math.floor(this.w);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);this.z=Math.ceil(this.z);this.w=Math.ceil(this.w);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);this.z=Math.round(this.z);this.w=Math.round(this.w);return this},roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):Math.floor(this.y);this.z=0>this.z?Math.ceil(this.z):Math.floor(this.z); -this.w=0>this.w?Math.ceil(this.w):Math.floor(this.w);return this},negate:function(){this.x=-this.x;this.y=-this.y;this.z=-this.z;this.w=-this.w;return this},dot:function(a){return this.x*a.x+this.y*a.y+this.z*a.z+this.w*a.w},lengthSq:function(){return this.x*this.x+this.y*this.y+this.z*this.z+this.w*this.w},length:function(){return Math.sqrt(this.x*this.x+this.y*this.y+this.z*this.z+this.w*this.w)},lengthManhattan:function(){return Math.abs(this.x)+Math.abs(this.y)+Math.abs(this.z)+Math.abs(this.w)}, -normalize:function(){return this.divideScalar(this.length())},setLength:function(a){return this.multiplyScalar(a/this.length())},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;this.z+=(a.z-this.z)*b;this.w+=(a.w-this.w)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},equals:function(a){return a.x===this.x&&a.y===this.y&&a.z===this.z&&a.w===this.w},fromArray:function(a,b){void 0===b&&(b=0);this.x=a[b];this.y=a[b+1];this.z=a[b+2];this.w= -a[b+3];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;a[b+2]=this.z;a[b+3]=this.w;return a},fromAttribute:function(a,b,c){void 0===c&&(c=0);b=b*a.itemSize+c;this.x=a.array[b];this.y=a.array[b+1];this.z=a.array[b+2];this.w=a.array[b+3];return this}};Object.assign(Db.prototype,sa.prototype,{isWebGLRenderTarget:!0,setSize:function(a,b){if(this.width!==a||this.height!==b)this.width=a,this.height=b,this.dispose();this.viewport.set(0,0,a,b);this.scissor.set(0, -0,a,b)},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.width=a.width;this.height=a.height;this.viewport.copy(a.viewport);this.texture=a.texture.clone();this.depthBuffer=a.depthBuffer;this.stencilBuffer=a.stencilBuffer;this.depthTexture=a.depthTexture;return this},dispose:function(){this.dispatchEvent({type:"dispose"})}});Eb.prototype=Object.create(Db.prototype);Eb.prototype.constructor=Eb;Eb.prototype.isWebGLRenderTargetCube=!0;ba.prototype={constructor:ba,get x(){return this._x}, -set x(a){this._x=a;this.onChangeCallback()},get y(){return this._y},set y(a){this._y=a;this.onChangeCallback()},get z(){return this._z},set z(a){this._z=a;this.onChangeCallback()},get w(){return this._w},set w(a){this._w=a;this.onChangeCallback()},set:function(a,b,c,d){this._x=a;this._y=b;this._z=c;this._w=d;this.onChangeCallback();return this},clone:function(){return new this.constructor(this._x,this._y,this._z,this._w)},copy:function(a){this._x=a.x;this._y=a.y;this._z=a.z;this._w=a.w;this.onChangeCallback(); -return this},setFromEuler:function(a,b){if(!1===(a&&a.isEuler))throw Error("THREE.Quaternion: .setFromEuler() now expects an Euler rotation rather than a Vector3 and order.");var c=Math.cos(a._x/2),d=Math.cos(a._y/2),e=Math.cos(a._z/2),f=Math.sin(a._x/2),g=Math.sin(a._y/2),h=Math.sin(a._z/2),k=a.order;"XYZ"===k?(this._x=f*d*e+c*g*h,this._y=c*g*e-f*d*h,this._z=c*d*h+f*g*e,this._w=c*d*e-f*g*h):"YXZ"===k?(this._x=f*d*e+c*g*h,this._y=c*g*e-f*d*h,this._z=c*d*h-f*g*e,this._w=c*d*e+f*g*h):"ZXY"===k?(this._x= -f*d*e-c*g*h,this._y=c*g*e+f*d*h,this._z=c*d*h+f*g*e,this._w=c*d*e-f*g*h):"ZYX"===k?(this._x=f*d*e-c*g*h,this._y=c*g*e+f*d*h,this._z=c*d*h-f*g*e,this._w=c*d*e+f*g*h):"YZX"===k?(this._x=f*d*e+c*g*h,this._y=c*g*e+f*d*h,this._z=c*d*h-f*g*e,this._w=c*d*e-f*g*h):"XZY"===k&&(this._x=f*d*e-c*g*h,this._y=c*g*e-f*d*h,this._z=c*d*h+f*g*e,this._w=c*d*e+f*g*h);if(!1!==b)this.onChangeCallback();return this},setFromAxisAngle:function(a,b){var c=b/2,d=Math.sin(c);this._x=a.x*d;this._y=a.y*d;this._z=a.z*d;this._w= -Math.cos(c);this.onChangeCallback();return this},setFromRotationMatrix:function(a){var b=a.elements,c=b[0];a=b[4];var d=b[8],e=b[1],f=b[5],g=b[9],h=b[2],k=b[6],b=b[10],m=c+f+b;0f&&c>b?(c=2*Math.sqrt(1+c-f-b),this._w=(k-g)/c,this._x=.25*c,this._y=(a+e)/c,this._z=(d+h)/c):f>b?(c=2*Math.sqrt(1+f-c-b),this._w=(d-h)/c,this._x=(a+e)/c,this._y=.25*c,this._z=(g+k)/c):(c=2*Math.sqrt(1+b-c-f),this._w=(e-a)/c,this._x=(d+ -h)/c,this._y=(g+k)/c,this._z=.25*c);this.onChangeCallback();return this},setFromUnitVectors:function(){var a,b;return function(c,d){void 0===a&&(a=new q);b=c.dot(d)+1;1E-6>b?(b=0,Math.abs(c.x)>Math.abs(c.z)?a.set(-c.y,c.x,0):a.set(0,-c.z,c.y)):a.crossVectors(c,d);this._x=a.x;this._y=a.y;this._z=a.z;this._w=b;return this.normalize()}}(),inverse:function(){return this.conjugate().normalize()},conjugate:function(){this._x*=-1;this._y*=-1;this._z*=-1;this.onChangeCallback();return this},dot:function(a){return this._x* -a._x+this._y*a._y+this._z*a._z+this._w*a._w},lengthSq:function(){return this._x*this._x+this._y*this._y+this._z*this._z+this._w*this._w},length:function(){return Math.sqrt(this._x*this._x+this._y*this._y+this._z*this._z+this._w*this._w)},normalize:function(){var a=this.length();0===a?(this._z=this._y=this._x=0,this._w=1):(a=1/a,this._x*=a,this._y*=a,this._z*=a,this._w*=a);this.onChangeCallback();return this},multiply:function(a,b){return void 0!==b?(console.warn("THREE.Quaternion: .multiply() now only accepts one argument. Use .multiplyQuaternions( a, b ) instead."), -this.multiplyQuaternions(a,b)):this.multiplyQuaternions(this,a)},premultiply:function(a){return this.multiplyQuaternions(a,this)},multiplyQuaternions:function(a,b){var c=a._x,d=a._y,e=a._z,f=a._w,g=b._x,h=b._y,k=b._z,m=b._w;this._x=c*m+f*g+d*k-e*h;this._y=d*m+f*h+e*g-c*k;this._z=e*m+f*k+c*h-d*g;this._w=f*m-c*g-d*h-e*k;this.onChangeCallback();return this},slerp:function(a,b){if(0===b)return this;if(1===b)return this.copy(a);var c=this._x,d=this._y,e=this._z,f=this._w,g=f*a._w+c*a._x+d*a._y+e*a._z; -0>g?(this._w=-a._w,this._x=-a._x,this._y=-a._y,this._z=-a._z,g=-g):this.copy(a);if(1<=g)return this._w=f,this._x=c,this._y=d,this._z=e,this;var h=Math.sqrt(1-g*g);if(.001>Math.abs(h))return this._w=.5*(f+this._w),this._x=.5*(c+this._x),this._y=.5*(d+this._y),this._z=.5*(e+this._z),this;var k=Math.atan2(h,g),g=Math.sin((1-b)*k)/h,h=Math.sin(b*k)/h;this._w=f*g+this._w*h;this._x=c*g+this._x*h;this._y=d*g+this._y*h;this._z=e*g+this._z*h;this.onChangeCallback();return this},equals:function(a){return a._x=== -this._x&&a._y===this._y&&a._z===this._z&&a._w===this._w},fromArray:function(a,b){void 0===b&&(b=0);this._x=a[b];this._y=a[b+1];this._z=a[b+2];this._w=a[b+3];this.onChangeCallback();return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this._x;a[b+1]=this._y;a[b+2]=this._z;a[b+3]=this._w;return a},onChange:function(a){this.onChangeCallback=a;return this},onChangeCallback:function(){}};Object.assign(ba,{slerp:function(a,b,c,d){return c.copy(a).slerp(b,d)},slerpFlat:function(a, -b,c,d,e,f,g){var h=c[d+0],k=c[d+1],m=c[d+2];c=c[d+3];d=e[f+0];var l=e[f+1],n=e[f+2];e=e[f+3];if(c!==e||h!==d||k!==l||m!==n){f=1-g;var p=h*d+k*l+m*n+c*e,r=0<=p?1:-1,x=1-p*p;x>Number.EPSILON&&(x=Math.sqrt(x),p=Math.atan2(x,p*r),f=Math.sin(f*p)/x,g=Math.sin(g*p)/x);r*=g;h=h*f+d*r;k=k*f+l*r;m=m*f+n*r;c=c*f+e*r;f===1-g&&(g=1/Math.sqrt(h*h+k*k+m*m+c*c),h*=g,k*=g,m*=g,c*=g)}a[b]=h;a[b+1]=k;a[b+2]=m;a[b+3]=c}});q.prototype={constructor:q,isVector3:!0,set:function(a,b,c){this.x=a;this.y=b;this.z=c;return this}, -setScalar:function(a){this.z=this.y=this.x=a;return this},setX:function(a){this.x=a;return this},setY:function(a){this.y=a;return this},setZ:function(a){this.z=a;return this},setComponent:function(a,b){switch(a){case 0:this.x=b;break;case 1:this.y=b;break;case 2:this.z=b;break;default:throw Error("index is out of range: "+a);}return this},getComponent:function(a){switch(a){case 0:return this.x;case 1:return this.y;case 2:return this.z;default:throw Error("index is out of range: "+a);}},clone:function(){return new this.constructor(this.x, -this.y,this.z)},copy:function(a){this.x=a.x;this.y=a.y;this.z=a.z;return this},add:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .add() now only accepts one argument. Use .addVectors( a, b ) instead."),this.addVectors(a,b);this.x+=a.x;this.y+=a.y;this.z+=a.z;return this},addScalar:function(a){this.x+=a;this.y+=a;this.z+=a;return this},addVectors:function(a,b){this.x=a.x+b.x;this.y=a.y+b.y;this.z=a.z+b.z;return this},addScaledVector:function(a,b){this.x+=a.x*b;this.y+=a.y*b;this.z+= -a.z*b;return this},sub:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .sub() now only accepts one argument. Use .subVectors( a, b ) instead."),this.subVectors(a,b);this.x-=a.x;this.y-=a.y;this.z-=a.z;return this},subScalar:function(a){this.x-=a;this.y-=a;this.z-=a;return this},subVectors:function(a,b){this.x=a.x-b.x;this.y=a.y-b.y;this.z=a.z-b.z;return this},multiply:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .multiply() now only accepts one argument. Use .multiplyVectors( a, b ) instead."), -this.multiplyVectors(a,b);this.x*=a.x;this.y*=a.y;this.z*=a.z;return this},multiplyScalar:function(a){isFinite(a)?(this.x*=a,this.y*=a,this.z*=a):this.z=this.y=this.x=0;return this},multiplyVectors:function(a,b){this.x=a.x*b.x;this.y=a.y*b.y;this.z=a.z*b.z;return this},applyEuler:function(){var a;return function(b){!1===(b&&b.isEuler)&&console.error("THREE.Vector3: .applyEuler() now expects an Euler rotation rather than a Vector3 and order.");void 0===a&&(a=new ba);return this.applyQuaternion(a.setFromEuler(b))}}(), -applyAxisAngle:function(){var a;return function(b,c){void 0===a&&(a=new ba);return this.applyQuaternion(a.setFromAxisAngle(b,c))}}(),applyMatrix3:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;this.x=a[0]*b+a[3]*c+a[6]*d;this.y=a[1]*b+a[4]*c+a[7]*d;this.z=a[2]*b+a[5]*c+a[8]*d;return this},applyMatrix4:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;this.x=a[0]*b+a[4]*c+a[8]*d+a[12];this.y=a[1]*b+a[5]*c+a[9]*d+a[13];this.z=a[2]*b+a[6]*c+a[10]*d+a[14];return this},applyProjection:function(a){var b= -this.x,c=this.y,d=this.z;a=a.elements;var e=1/(a[3]*b+a[7]*c+a[11]*d+a[15]);this.x=(a[0]*b+a[4]*c+a[8]*d+a[12])*e;this.y=(a[1]*b+a[5]*c+a[9]*d+a[13])*e;this.z=(a[2]*b+a[6]*c+a[10]*d+a[14])*e;return this},applyQuaternion:function(a){var b=this.x,c=this.y,d=this.z,e=a.x,f=a.y,g=a.z;a=a.w;var h=a*b+f*d-g*c,k=a*c+g*b-e*d,m=a*d+e*c-f*b,b=-e*b-f*c-g*d;this.x=h*a+b*-e+k*-g-m*-f;this.y=k*a+b*-f+m*-e-h*-g;this.z=m*a+b*-g+h*-f-k*-e;return this},project:function(){var a;return function(b){void 0===a&&(a=new J); -a.multiplyMatrices(b.projectionMatrix,a.getInverse(b.matrixWorld));return this.applyProjection(a)}}(),unproject:function(){var a;return function(b){void 0===a&&(a=new J);a.multiplyMatrices(b.matrixWorld,a.getInverse(b.projectionMatrix));return this.applyProjection(a)}}(),transformDirection:function(a){var b=this.x,c=this.y,d=this.z;a=a.elements;this.x=a[0]*b+a[4]*c+a[8]*d;this.y=a[1]*b+a[5]*c+a[9]*d;this.z=a[2]*b+a[6]*c+a[10]*d;return this.normalize()},divide:function(a){this.x/=a.x;this.y/=a.y;this.z/= -a.z;return this},divideScalar:function(a){return this.multiplyScalar(1/a)},min:function(a){this.x=Math.min(this.x,a.x);this.y=Math.min(this.y,a.y);this.z=Math.min(this.z,a.z);return this},max:function(a){this.x=Math.max(this.x,a.x);this.y=Math.max(this.y,a.y);this.z=Math.max(this.z,a.z);return this},clamp:function(a,b){this.x=Math.max(a.x,Math.min(b.x,this.x));this.y=Math.max(a.y,Math.min(b.y,this.y));this.z=Math.max(a.z,Math.min(b.z,this.z));return this},clampScalar:function(){var a,b;return function(c, -d){void 0===a&&(a=new q,b=new q);a.set(c,c,c);b.set(d,d,d);return this.clamp(a,b)}}(),clampLength:function(a,b){var c=this.length();return this.multiplyScalar(Math.max(a,Math.min(b,c))/c)},floor:function(){this.x=Math.floor(this.x);this.y=Math.floor(this.y);this.z=Math.floor(this.z);return this},ceil:function(){this.x=Math.ceil(this.x);this.y=Math.ceil(this.y);this.z=Math.ceil(this.z);return this},round:function(){this.x=Math.round(this.x);this.y=Math.round(this.y);this.z=Math.round(this.z);return this}, -roundToZero:function(){this.x=0>this.x?Math.ceil(this.x):Math.floor(this.x);this.y=0>this.y?Math.ceil(this.y):Math.floor(this.y);this.z=0>this.z?Math.ceil(this.z):Math.floor(this.z);return this},negate:function(){this.x=-this.x;this.y=-this.y;this.z=-this.z;return this},dot:function(a){return this.x*a.x+this.y*a.y+this.z*a.z},lengthSq:function(){return this.x*this.x+this.y*this.y+this.z*this.z},length:function(){return Math.sqrt(this.x*this.x+this.y*this.y+this.z*this.z)},lengthManhattan:function(){return Math.abs(this.x)+ -Math.abs(this.y)+Math.abs(this.z)},normalize:function(){return this.divideScalar(this.length())},setLength:function(a){return this.multiplyScalar(a/this.length())},lerp:function(a,b){this.x+=(a.x-this.x)*b;this.y+=(a.y-this.y)*b;this.z+=(a.z-this.z)*b;return this},lerpVectors:function(a,b,c){return this.subVectors(b,a).multiplyScalar(c).add(a)},cross:function(a,b){if(void 0!==b)return console.warn("THREE.Vector3: .cross() now only accepts one argument. Use .crossVectors( a, b ) instead."),this.crossVectors(a, -b);var c=this.x,d=this.y,e=this.z;this.x=d*a.z-e*a.y;this.y=e*a.x-c*a.z;this.z=c*a.y-d*a.x;return this},crossVectors:function(a,b){var c=a.x,d=a.y,e=a.z,f=b.x,g=b.y,h=b.z;this.x=d*h-e*g;this.y=e*f-c*h;this.z=c*g-d*f;return this},projectOnVector:function(a){var b=a.dot(this)/a.lengthSq();return this.copy(a).multiplyScalar(b)},projectOnPlane:function(){var a;return function(b){void 0===a&&(a=new q);a.copy(this).projectOnVector(b);return this.sub(a)}}(),reflect:function(){var a;return function(b){void 0=== -a&&(a=new q);return this.sub(a.copy(b).multiplyScalar(2*this.dot(b)))}}(),angleTo:function(a){a=this.dot(a)/Math.sqrt(this.lengthSq()*a.lengthSq());return Math.acos(T.clamp(a,-1,1))},distanceTo:function(a){return Math.sqrt(this.distanceToSquared(a))},distanceToSquared:function(a){var b=this.x-a.x,c=this.y-a.y;a=this.z-a.z;return b*b+c*c+a*a},distanceToManhattan:function(a){return Math.abs(this.x-a.x)+Math.abs(this.y-a.y)+Math.abs(this.z-a.z)},setFromSpherical:function(a){var b=Math.sin(a.phi)*a.radius; -this.x=b*Math.sin(a.theta);this.y=Math.cos(a.phi)*a.radius;this.z=b*Math.cos(a.theta);return this},setFromMatrixPosition:function(a){return this.setFromMatrixColumn(a,3)},setFromMatrixScale:function(a){var b=this.setFromMatrixColumn(a,0).length(),c=this.setFromMatrixColumn(a,1).length();a=this.setFromMatrixColumn(a,2).length();this.x=b;this.y=c;this.z=a;return this},setFromMatrixColumn:function(a,b){if("number"===typeof a){console.warn("THREE.Vector3: setFromMatrixColumn now expects ( matrix, index )."); -var c=a;a=b;b=c}return this.fromArray(a.elements,4*b)},equals:function(a){return a.x===this.x&&a.y===this.y&&a.z===this.z},fromArray:function(a,b){void 0===b&&(b=0);this.x=a[b];this.y=a[b+1];this.z=a[b+2];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this.x;a[b+1]=this.y;a[b+2]=this.z;return a},fromAttribute:function(a,b,c){void 0===c&&(c=0);b=b*a.itemSize+c;this.x=a.array[b];this.y=a.array[b+1];this.z=a.array[b+2];return this}};J.prototype={constructor:J,isMatrix4:!0, -set:function(a,b,c,d,e,f,g,h,k,m,l,n,p,r,x,t){var q=this.elements;q[0]=a;q[4]=b;q[8]=c;q[12]=d;q[1]=e;q[5]=f;q[9]=g;q[13]=h;q[2]=k;q[6]=m;q[10]=l;q[14]=n;q[3]=p;q[7]=r;q[11]=x;q[15]=t;return this},identity:function(){this.set(1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1);return this},clone:function(){return(new J).fromArray(this.elements)},copy:function(a){this.elements.set(a.elements);return this},copyPosition:function(a){var b=this.elements;a=a.elements;b[12]=a[12];b[13]=a[13];b[14]=a[14];return this},extractBasis:function(a, -b,c){a.setFromMatrixColumn(this,0);b.setFromMatrixColumn(this,1);c.setFromMatrixColumn(this,2);return this},makeBasis:function(a,b,c){this.set(a.x,b.x,c.x,0,a.y,b.y,c.y,0,a.z,b.z,c.z,0,0,0,0,1);return this},extractRotation:function(){var a;return function(b){void 0===a&&(a=new q);var c=this.elements,d=b.elements,e=1/a.setFromMatrixColumn(b,0).length(),f=1/a.setFromMatrixColumn(b,1).length();b=1/a.setFromMatrixColumn(b,2).length();c[0]=d[0]*e;c[1]=d[1]*e;c[2]=d[2]*e;c[4]=d[4]*f;c[5]=d[5]*f;c[6]=d[6]* -f;c[8]=d[8]*b;c[9]=d[9]*b;c[10]=d[10]*b;return this}}(),makeRotationFromEuler:function(a){!1===(a&&a.isEuler)&&console.error("THREE.Matrix: .makeRotationFromEuler() now expects a Euler rotation rather than a Vector3 and order.");var b=this.elements,c=a.x,d=a.y,e=a.z,f=Math.cos(c),c=Math.sin(c),g=Math.cos(d),d=Math.sin(d),h=Math.cos(e),e=Math.sin(e);if("XYZ"===a.order){a=f*h;var k=f*e,m=c*h,l=c*e;b[0]=g*h;b[4]=-g*e;b[8]=d;b[1]=k+m*d;b[5]=a-l*d;b[9]=-c*g;b[2]=l-a*d;b[6]=m+k*d;b[10]=f*g}else"YXZ"=== -a.order?(a=g*h,k=g*e,m=d*h,l=d*e,b[0]=a+l*c,b[4]=m*c-k,b[8]=f*d,b[1]=f*e,b[5]=f*h,b[9]=-c,b[2]=k*c-m,b[6]=l+a*c,b[10]=f*g):"ZXY"===a.order?(a=g*h,k=g*e,m=d*h,l=d*e,b[0]=a-l*c,b[4]=-f*e,b[8]=m+k*c,b[1]=k+m*c,b[5]=f*h,b[9]=l-a*c,b[2]=-f*d,b[6]=c,b[10]=f*g):"ZYX"===a.order?(a=f*h,k=f*e,m=c*h,l=c*e,b[0]=g*h,b[4]=m*d-k,b[8]=a*d+l,b[1]=g*e,b[5]=l*d+a,b[9]=k*d-m,b[2]=-d,b[6]=c*g,b[10]=f*g):"YZX"===a.order?(a=f*g,k=f*d,m=c*g,l=c*d,b[0]=g*h,b[4]=l-a*e,b[8]=m*e+k,b[1]=e,b[5]=f*h,b[9]=-c*h,b[2]=-d*h,b[6]=k* -e+m,b[10]=a-l*e):"XZY"===a.order&&(a=f*g,k=f*d,m=c*g,l=c*d,b[0]=g*h,b[4]=-e,b[8]=d*h,b[1]=a*e+l,b[5]=f*h,b[9]=k*e-m,b[2]=m*e-k,b[6]=c*h,b[10]=l*e+a);b[3]=0;b[7]=0;b[11]=0;b[12]=0;b[13]=0;b[14]=0;b[15]=1;return this},makeRotationFromQuaternion:function(a){var b=this.elements,c=a.x,d=a.y,e=a.z,f=a.w,g=c+c,h=d+d,k=e+e;a=c*g;var m=c*h,c=c*k,l=d*h,d=d*k,e=e*k,g=f*g,h=f*h,f=f*k;b[0]=1-(l+e);b[4]=m-f;b[8]=c+h;b[1]=m+f;b[5]=1-(a+e);b[9]=d-g;b[2]=c-h;b[6]=d+g;b[10]=1-(a+l);b[3]=0;b[7]=0;b[11]=0;b[12]=0;b[13]= -0;b[14]=0;b[15]=1;return this},lookAt:function(){var a,b,c;return function(d,e,f){void 0===a&&(a=new q,b=new q,c=new q);var g=this.elements;c.subVectors(d,e).normalize();0===c.lengthSq()&&(c.z=1);a.crossVectors(f,c).normalize();0===a.lengthSq()&&(c.z+=1E-4,a.crossVectors(f,c).normalize());b.crossVectors(c,a);g[0]=a.x;g[4]=b.x;g[8]=c.x;g[1]=a.y;g[5]=b.y;g[9]=c.y;g[2]=a.z;g[6]=b.z;g[10]=c.z;return this}}(),multiply:function(a,b){return void 0!==b?(console.warn("THREE.Matrix4: .multiply() now only accepts one argument. Use .multiplyMatrices( a, b ) instead."), -this.multiplyMatrices(a,b)):this.multiplyMatrices(this,a)},premultiply:function(a){return this.multiplyMatrices(a,this)},multiplyMatrices:function(a,b){var c=a.elements,d=b.elements,e=this.elements,f=c[0],g=c[4],h=c[8],k=c[12],m=c[1],l=c[5],n=c[9],p=c[13],r=c[2],x=c[6],t=c[10],q=c[14],u=c[3],v=c[7],I=c[11],c=c[15],y=d[0],E=d[4],H=d[8],F=d[12],M=d[1],B=d[5],K=d[9],z=d[13],C=d[2],G=d[6],J=d[10],N=d[14],P=d[3],R=d[7],S=d[11],d=d[15];e[0]=f*y+g*M+h*C+k*P;e[4]=f*E+g*B+h*G+k*R;e[8]=f*H+g*K+h*J+k*S;e[12]= -f*F+g*z+h*N+k*d;e[1]=m*y+l*M+n*C+p*P;e[5]=m*E+l*B+n*G+p*R;e[9]=m*H+l*K+n*J+p*S;e[13]=m*F+l*z+n*N+p*d;e[2]=r*y+x*M+t*C+q*P;e[6]=r*E+x*B+t*G+q*R;e[10]=r*H+x*K+t*J+q*S;e[14]=r*F+x*z+t*N+q*d;e[3]=u*y+v*M+I*C+c*P;e[7]=u*E+v*B+I*G+c*R;e[11]=u*H+v*K+I*J+c*S;e[15]=u*F+v*z+I*N+c*d;return this},multiplyToArray:function(a,b,c){var d=this.elements;this.multiplyMatrices(a,b);c[0]=d[0];c[1]=d[1];c[2]=d[2];c[3]=d[3];c[4]=d[4];c[5]=d[5];c[6]=d[6];c[7]=d[7];c[8]=d[8];c[9]=d[9];c[10]=d[10];c[11]=d[11];c[12]=d[12]; -c[13]=d[13];c[14]=d[14];c[15]=d[15];return this},multiplyScalar:function(a){var b=this.elements;b[0]*=a;b[4]*=a;b[8]*=a;b[12]*=a;b[1]*=a;b[5]*=a;b[9]*=a;b[13]*=a;b[2]*=a;b[6]*=a;b[10]*=a;b[14]*=a;b[3]*=a;b[7]*=a;b[11]*=a;b[15]*=a;return this},applyToVector3Array:function(){var a;return function(b,c,d){void 0===a&&(a=new q);void 0===c&&(c=0);void 0===d&&(d=b.length);for(var e=0;ethis.determinant()&&(g=-g);c.x=f[12];c.y=f[13];c.z=f[14];b.elements.set(this.elements);c=1/g;var f=1/h,m=1/k;b.elements[0]*=c;b.elements[1]*=c;b.elements[2]*=c;b.elements[4]*=f;b.elements[5]*=f;b.elements[6]*=f;b.elements[8]*=m;b.elements[9]*=m;b.elements[10]*=m;d.setFromRotationMatrix(b);e.x=g;e.y=h;e.z=k;return this}}(),makeFrustum:function(a,b,c,d,e,f){var g=this.elements;g[0]=2*e/(b-a);g[4]=0;g[8]=(b+a)/(b-a);g[12]=0;g[1]=0;g[5]=2*e/(d-c); -g[9]=(d+c)/(d-c);g[13]=0;g[2]=0;g[6]=0;g[10]=-(f+e)/(f-e);g[14]=-2*f*e/(f-e);g[3]=0;g[7]=0;g[11]=-1;g[15]=0;return this},makePerspective:function(a,b,c,d){a=c*Math.tan(T.DEG2RAD*a*.5);var e=-a;return this.makeFrustum(e*b,a*b,e,a,c,d)},makeOrthographic:function(a,b,c,d,e,f){var g=this.elements,h=1/(b-a),k=1/(c-d),m=1/(f-e);g[0]=2*h;g[4]=0;g[8]=0;g[12]=-((b+a)*h);g[1]=0;g[5]=2*k;g[9]=0;g[13]=-((c+d)*k);g[2]=0;g[6]=0;g[10]=-2*m;g[14]=-((f+e)*m);g[3]=0;g[7]=0;g[11]=0;g[15]=1;return this},equals:function(a){var b= -this.elements;a=a.elements;for(var c=0;16>c;c++)if(b[c]!==a[c])return!1;return!0},fromArray:function(a,b){void 0===b&&(b=0);for(var c=0;16>c;c++)this.elements[c]=a[c+b];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);var c=this.elements;a[b]=c[0];a[b+1]=c[1];a[b+2]=c[2];a[b+3]=c[3];a[b+4]=c[4];a[b+5]=c[5];a[b+6]=c[6];a[b+7]=c[7];a[b+8]=c[8];a[b+9]=c[9];a[b+10]=c[10];a[b+11]=c[11];a[b+12]=c[12];a[b+13]=c[13];a[b+14]=c[14];a[b+15]=c[15];return a}};Xa.prototype=Object.create(da.prototype); -Xa.prototype.constructor=Xa;Xa.prototype.isCubeTexture=!0;Object.defineProperty(Xa.prototype,"images",{get:function(){return this.image},set:function(a){this.image=a}});var ie=new da,je=new Xa,fe=[],he=[];ne.prototype.setValue=function(a,b){for(var c=this.seq,d=0,e=c.length;d!==e;++d){var f=c[d];f.setValue(a,b[f.id])}};var zd=/([\w\d_]+)(\])?(\[|\.)?/g;Ya.prototype.setValue=function(a,b,c){b=this.map[b];void 0!==b&&b.setValue(a,c,this.renderer)};Ya.prototype.set=function(a,b,c){var d=this.map[c]; -void 0!==d&&d.setValue(a,b[c],this.renderer)};Ya.prototype.setOptional=function(a,b,c){b=b[c];void 0!==b&&this.setValue(a,c,b)};Ya.upload=function(a,b,c,d){for(var e=0,f=b.length;e!==f;++e){var g=b[e],h=c[g.id];!1!==h.needsUpdate&&g.setValue(a,h.value,d)}};Ya.seqWithValue=function(a,b){for(var c=[],d=0,e=a.length;d!==e;++d){var f=a[d];f.id in b&&c.push(f)}return c};var La={merge:function(a){for(var b={},c=0;c 0.0 ) {\n#if defined ( PHYSICALLY_CORRECT_LIGHTS )\n\t\t\tfloat distanceFalloff = 1.0 / max( pow( lightDistance, decayExponent ), 0.01 );\n\t\t\tfloat maxDistanceCutoffFactor = pow2( saturate( 1.0 - pow4( lightDistance / cutoffDistance ) ) );\n\t\t\treturn distanceFalloff * maxDistanceCutoffFactor;\n#else\n\t\t\treturn pow( saturate( -lightDistance / cutoffDistance + 1.0 ), decayExponent );\n#endif\n\t\t}\n\t\treturn 1.0;\n}\nvec3 BRDF_Diffuse_Lambert( const in vec3 diffuseColor ) {\n\treturn RECIPROCAL_PI * diffuseColor;\n}\nvec3 F_Schlick( const in vec3 specularColor, const in float dotLH ) {\n\tfloat fresnel = exp2( ( -5.55473 * dotLH - 6.98316 ) * dotLH );\n\treturn ( 1.0 - specularColor ) * fresnel + specularColor;\n}\nfloat G_GGX_Smith( const in float alpha, const in float dotNL, const in float dotNV ) {\n\tfloat a2 = pow2( alpha );\n\tfloat gl = dotNL + sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNL ) );\n\tfloat gv = dotNV + sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNV ) );\n\treturn 1.0 / ( gl * gv );\n}\nfloat G_GGX_SmithCorrelated( const in float alpha, const in float dotNL, const in float dotNV ) {\n\tfloat a2 = pow2( alpha );\n\tfloat gv = dotNL * sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNV ) );\n\tfloat gl = dotNV * sqrt( a2 + ( 1.0 - a2 ) * pow2( dotNL ) );\n\treturn 0.5 / max( gv + gl, EPSILON );\n}\nfloat D_GGX( const in float alpha, const in float dotNH ) {\n\tfloat a2 = pow2( alpha );\n\tfloat denom = pow2( dotNH ) * ( a2 - 1.0 ) + 1.0;\n\treturn RECIPROCAL_PI * a2 / pow2( denom );\n}\nvec3 BRDF_Specular_GGX( const in IncidentLight incidentLight, const in GeometricContext geometry, const in vec3 specularColor, const in float roughness ) {\n\tfloat alpha = pow2( roughness );\n\tvec3 halfDir = normalize( incidentLight.direction + geometry.viewDir );\n\tfloat dotNL = saturate( dot( geometry.normal, incidentLight.direction ) );\n\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\tfloat dotNH = saturate( dot( geometry.normal, halfDir ) );\n\tfloat dotLH = saturate( dot( incidentLight.direction, halfDir ) );\n\tvec3 F = F_Schlick( specularColor, dotLH );\n\tfloat G = G_GGX_SmithCorrelated( alpha, dotNL, dotNV );\n\tfloat D = D_GGX( alpha, dotNH );\n\treturn F * ( G * D );\n}\nvec3 BRDF_Specular_GGX_Environment( const in GeometricContext geometry, const in vec3 specularColor, const in float roughness ) {\n\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\tconst vec4 c0 = vec4( - 1, - 0.0275, - 0.572, 0.022 );\n\tconst vec4 c1 = vec4( 1, 0.0425, 1.04, - 0.04 );\n\tvec4 r = roughness * c0 + c1;\n\tfloat a004 = min( r.x * r.x, exp2( - 9.28 * dotNV ) ) * r.x + r.y;\n\tvec2 AB = vec2( -1.04, 1.04 ) * a004 + r.zw;\n\treturn specularColor * AB.x + AB.y;\n}\nfloat G_BlinnPhong_Implicit( ) {\n\treturn 0.25;\n}\nfloat D_BlinnPhong( const in float shininess, const in float dotNH ) {\n\treturn RECIPROCAL_PI * ( shininess * 0.5 + 1.0 ) * pow( dotNH, shininess );\n}\nvec3 BRDF_Specular_BlinnPhong( const in IncidentLight incidentLight, const in GeometricContext geometry, const in vec3 specularColor, const in float shininess ) {\n\tvec3 halfDir = normalize( incidentLight.direction + geometry.viewDir );\n\tfloat dotNH = saturate( dot( geometry.normal, halfDir ) );\n\tfloat dotLH = saturate( dot( incidentLight.direction, halfDir ) );\n\tvec3 F = F_Schlick( specularColor, dotLH );\n\tfloat G = G_BlinnPhong_Implicit( );\n\tfloat D = D_BlinnPhong( shininess, dotNH );\n\treturn F * ( G * D );\n}\nfloat GGXRoughnessToBlinnExponent( const in float ggxRoughness ) {\n\treturn ( 2.0 / pow2( ggxRoughness + 0.0001 ) - 2.0 );\n}\nfloat BlinnExponentToGGXRoughness( const in float blinnExponent ) {\n\treturn sqrt( 2.0 / ( blinnExponent + 2.0 ) );\n}\n", -bumpmap_pars_fragment:"#ifdef USE_BUMPMAP\n\tuniform sampler2D bumpMap;\n\tuniform float bumpScale;\n\tvec2 dHdxy_fwd() {\n\t\tvec2 dSTdx = dFdx( vUv );\n\t\tvec2 dSTdy = dFdy( vUv );\n\t\tfloat Hll = bumpScale * texture2D( bumpMap, vUv ).x;\n\t\tfloat dBx = bumpScale * texture2D( bumpMap, vUv + dSTdx ).x - Hll;\n\t\tfloat dBy = bumpScale * texture2D( bumpMap, vUv + dSTdy ).x - Hll;\n\t\treturn vec2( dBx, dBy );\n\t}\n\tvec3 perturbNormalArb( vec3 surf_pos, vec3 surf_norm, vec2 dHdxy ) {\n\t\tvec3 vSigmaX = dFdx( surf_pos );\n\t\tvec3 vSigmaY = dFdy( surf_pos );\n\t\tvec3 vN = surf_norm;\n\t\tvec3 R1 = cross( vSigmaY, vN );\n\t\tvec3 R2 = cross( vN, vSigmaX );\n\t\tfloat fDet = dot( vSigmaX, R1 );\n\t\tvec3 vGrad = sign( fDet ) * ( dHdxy.x * R1 + dHdxy.y * R2 );\n\t\treturn normalize( abs( fDet ) * surf_norm - vGrad );\n\t}\n#endif\n", -clipping_planes_fragment:"#if NUM_CLIPPING_PLANES > 0\n\tfor ( int i = 0; i < UNION_CLIPPING_PLANES; ++ i ) {\n\t\tvec4 plane = clippingPlanes[ i ];\n\t\tif ( dot( vViewPosition, plane.xyz ) > plane.w ) discard;\n\t}\n\t\t\n\t#if UNION_CLIPPING_PLANES < NUM_CLIPPING_PLANES\n\t\tbool clipped = true;\n\t\tfor ( int i = UNION_CLIPPING_PLANES; i < NUM_CLIPPING_PLANES; ++ i ) {\n\t\t\tvec4 plane = clippingPlanes[ i ];\n\t\t\tclipped = ( dot( vViewPosition, plane.xyz ) > plane.w ) && clipped;\n\t\t}\n\t\tif ( clipped ) discard;\n\t\n\t#endif\n#endif\n", -clipping_planes_pars_fragment:"#if NUM_CLIPPING_PLANES > 0\n\t#if ! defined( PHYSICAL ) && ! defined( PHONG )\n\t\tvarying vec3 vViewPosition;\n\t#endif\n\tuniform vec4 clippingPlanes[ NUM_CLIPPING_PLANES ];\n#endif\n",clipping_planes_pars_vertex:"#if NUM_CLIPPING_PLANES > 0 && ! defined( PHYSICAL ) && ! defined( PHONG )\n\tvarying vec3 vViewPosition;\n#endif\n",clipping_planes_vertex:"#if NUM_CLIPPING_PLANES > 0 && ! defined( PHYSICAL ) && ! defined( PHONG )\n\tvViewPosition = - mvPosition.xyz;\n#endif\n", -color_fragment:"#ifdef USE_COLOR\n\tdiffuseColor.rgb *= vColor;\n#endif",color_pars_fragment:"#ifdef USE_COLOR\n\tvarying vec3 vColor;\n#endif\n",color_pars_vertex:"#ifdef USE_COLOR\n\tvarying vec3 vColor;\n#endif",color_vertex:"#ifdef USE_COLOR\n\tvColor.xyz = color.xyz;\n#endif",common:"#define PI 3.14159265359\n#define PI2 6.28318530718\n#define RECIPROCAL_PI 0.31830988618\n#define RECIPROCAL_PI2 0.15915494\n#define LOG2 1.442695\n#define EPSILON 1e-6\n#define saturate(a) clamp( a, 0.0, 1.0 )\n#define whiteCompliment(a) ( 1.0 - saturate( a ) )\nfloat pow2( const in float x ) { return x*x; }\nfloat pow3( const in float x ) { return x*x*x; }\nfloat pow4( const in float x ) { float x2 = x*x; return x2*x2; }\nfloat average( const in vec3 color ) { return dot( color, vec3( 0.3333 ) ); }\nhighp float rand( const in vec2 uv ) {\n\tconst highp float a = 12.9898, b = 78.233, c = 43758.5453;\n\thighp float dt = dot( uv.xy, vec2( a,b ) ), sn = mod( dt, PI );\n\treturn fract(sin(sn) * c);\n}\nstruct IncidentLight {\n\tvec3 color;\n\tvec3 direction;\n\tbool visible;\n};\nstruct ReflectedLight {\n\tvec3 directDiffuse;\n\tvec3 directSpecular;\n\tvec3 indirectDiffuse;\n\tvec3 indirectSpecular;\n};\nstruct GeometricContext {\n\tvec3 position;\n\tvec3 normal;\n\tvec3 viewDir;\n};\nvec3 transformDirection( in vec3 dir, in mat4 matrix ) {\n\treturn normalize( ( matrix * vec4( dir, 0.0 ) ).xyz );\n}\nvec3 inverseTransformDirection( in vec3 dir, in mat4 matrix ) {\n\treturn normalize( ( vec4( dir, 0.0 ) * matrix ).xyz );\n}\nvec3 projectOnPlane(in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\tfloat distance = dot( planeNormal, point - pointOnPlane );\n\treturn - distance * planeNormal + point;\n}\nfloat sideOfPlane( in vec3 point, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\treturn sign( dot( point - pointOnPlane, planeNormal ) );\n}\nvec3 linePlaneIntersect( in vec3 pointOnLine, in vec3 lineDirection, in vec3 pointOnPlane, in vec3 planeNormal ) {\n\treturn lineDirection * ( dot( planeNormal, pointOnPlane - pointOnLine ) / dot( planeNormal, lineDirection ) ) + pointOnLine;\n}\n", -cube_uv_reflection_fragment:"#ifdef ENVMAP_TYPE_CUBE_UV\n#define cubeUV_textureSize (1024.0)\nint getFaceFromDirection(vec3 direction) {\n\tvec3 absDirection = abs(direction);\n\tint face = -1;\n\tif( absDirection.x > absDirection.z ) {\n\t\tif(absDirection.x > absDirection.y )\n\t\t\tface = direction.x > 0.0 ? 0 : 3;\n\t\telse\n\t\t\tface = direction.y > 0.0 ? 1 : 4;\n\t}\n\telse {\n\t\tif(absDirection.z > absDirection.y )\n\t\t\tface = direction.z > 0.0 ? 2 : 5;\n\t\telse\n\t\t\tface = direction.y > 0.0 ? 1 : 4;\n\t}\n\treturn face;\n}\n#define cubeUV_maxLods1 (log2(cubeUV_textureSize*0.25) - 1.0)\n#define cubeUV_rangeClamp (exp2((6.0 - 1.0) * 2.0))\nvec2 MipLevelInfo( vec3 vec, float roughnessLevel, float roughness ) {\n\tfloat scale = exp2(cubeUV_maxLods1 - roughnessLevel);\n\tfloat dxRoughness = dFdx(roughness);\n\tfloat dyRoughness = dFdy(roughness);\n\tvec3 dx = dFdx( vec * scale * dxRoughness );\n\tvec3 dy = dFdy( vec * scale * dyRoughness );\n\tfloat d = max( dot( dx, dx ), dot( dy, dy ) );\n\td = clamp(d, 1.0, cubeUV_rangeClamp);\n\tfloat mipLevel = 0.5 * log2(d);\n\treturn vec2(floor(mipLevel), fract(mipLevel));\n}\n#define cubeUV_maxLods2 (log2(cubeUV_textureSize*0.25) - 2.0)\n#define cubeUV_rcpTextureSize (1.0 / cubeUV_textureSize)\nvec2 getCubeUV(vec3 direction, float roughnessLevel, float mipLevel) {\n\tmipLevel = roughnessLevel > cubeUV_maxLods2 - 3.0 ? 0.0 : mipLevel;\n\tfloat a = 16.0 * cubeUV_rcpTextureSize;\n\tvec2 exp2_packed = exp2( vec2( roughnessLevel, mipLevel ) );\n\tvec2 rcp_exp2_packed = vec2( 1.0 ) / exp2_packed;\n\tfloat powScale = exp2_packed.x * exp2_packed.y;\n\tfloat scale = rcp_exp2_packed.x * rcp_exp2_packed.y * 0.25;\n\tfloat mipOffset = 0.75*(1.0 - rcp_exp2_packed.y) * rcp_exp2_packed.x;\n\tbool bRes = mipLevel == 0.0;\n\tscale = bRes && (scale < a) ? a : scale;\n\tvec3 r;\n\tvec2 offset;\n\tint face = getFaceFromDirection(direction);\n\tfloat rcpPowScale = 1.0 / powScale;\n\tif( face == 0) {\n\t\tr = vec3(direction.x, -direction.z, direction.y);\n\t\toffset = vec2(0.0+mipOffset,0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 1) {\n\t\tr = vec3(direction.y, direction.x, direction.z);\n\t\toffset = vec2(scale+mipOffset, 0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 2) {\n\t\tr = vec3(direction.z, direction.x, direction.y);\n\t\toffset = vec2(2.0*scale+mipOffset, 0.75 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;\n\t}\n\telse if( face == 3) {\n\t\tr = vec3(direction.x, direction.z, direction.y);\n\t\toffset = vec2(0.0+mipOffset,0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\telse if( face == 4) {\n\t\tr = vec3(direction.y, direction.x, -direction.z);\n\t\toffset = vec2(scale+mipOffset, 0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\telse {\n\t\tr = vec3(direction.z, -direction.x, direction.y);\n\t\toffset = vec2(2.0*scale+mipOffset, 0.5 * rcpPowScale);\n\t\toffset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;\n\t}\n\tr = normalize(r);\n\tfloat texelOffset = 0.5 * cubeUV_rcpTextureSize;\n\tvec2 s = ( r.yz / abs( r.x ) + vec2( 1.0 ) ) * 0.5;\n\tvec2 base = offset + vec2( texelOffset );\n\treturn base + s * ( scale - 2.0 * texelOffset );\n}\n#define cubeUV_maxLods3 (log2(cubeUV_textureSize*0.25) - 3.0)\nvec4 textureCubeUV(vec3 reflectedDirection, float roughness ) {\n\tfloat roughnessVal = roughness* cubeUV_maxLods3;\n\tfloat r1 = floor(roughnessVal);\n\tfloat r2 = r1 + 1.0;\n\tfloat t = fract(roughnessVal);\n\tvec2 mipInfo = MipLevelInfo(reflectedDirection, r1, roughness);\n\tfloat s = mipInfo.y;\n\tfloat level0 = mipInfo.x;\n\tfloat level1 = level0 + 1.0;\n\tlevel1 = level1 > 5.0 ? 5.0 : level1;\n\tlevel0 += min( floor( s + 0.5 ), 5.0 );\n\tvec2 uv_10 = getCubeUV(reflectedDirection, r1, level0);\n\tvec4 color10 = envMapTexelToLinear(texture2D(envMap, uv_10));\n\tvec2 uv_20 = getCubeUV(reflectedDirection, r2, level0);\n\tvec4 color20 = envMapTexelToLinear(texture2D(envMap, uv_20));\n\tvec4 result = mix(color10, color20, t);\n\treturn vec4(result.rgb, 1.0);\n}\n#endif\n", -defaultnormal_vertex:"#ifdef FLIP_SIDED\n\tobjectNormal = -objectNormal;\n#endif\nvec3 transformedNormal = normalMatrix * objectNormal;\n",displacementmap_pars_vertex:"#ifdef USE_DISPLACEMENTMAP\n\tuniform sampler2D displacementMap;\n\tuniform float displacementScale;\n\tuniform float displacementBias;\n#endif\n",displacementmap_vertex:"#ifdef USE_DISPLACEMENTMAP\n\ttransformed += normal * ( texture2D( displacementMap, uv ).x * displacementScale + displacementBias );\n#endif\n",emissivemap_fragment:"#ifdef USE_EMISSIVEMAP\n\tvec4 emissiveColor = texture2D( emissiveMap, vUv );\n\temissiveColor.rgb = emissiveMapTexelToLinear( emissiveColor ).rgb;\n\ttotalEmissiveRadiance *= emissiveColor.rgb;\n#endif\n", -emissivemap_pars_fragment:"#ifdef USE_EMISSIVEMAP\n\tuniform sampler2D emissiveMap;\n#endif\n",encodings_fragment:" gl_FragColor = linearToOutputTexel( gl_FragColor );\n",encodings_pars_fragment:"\nvec4 LinearToLinear( in vec4 value ) {\n return value;\n}\nvec4 GammaToLinear( in vec4 value, in float gammaFactor ) {\n return vec4( pow( value.xyz, vec3( gammaFactor ) ), value.w );\n}\nvec4 LinearToGamma( in vec4 value, in float gammaFactor ) {\n return vec4( pow( value.xyz, vec3( 1.0 / gammaFactor ) ), value.w );\n}\nvec4 sRGBToLinear( in vec4 value ) {\n return vec4( mix( pow( value.rgb * 0.9478672986 + vec3( 0.0521327014 ), vec3( 2.4 ) ), value.rgb * 0.0773993808, vec3( lessThanEqual( value.rgb, vec3( 0.04045 ) ) ) ), value.w );\n}\nvec4 LinearTosRGB( in vec4 value ) {\n return vec4( mix( pow( value.rgb, vec3( 0.41666 ) ) * 1.055 - vec3( 0.055 ), value.rgb * 12.92, vec3( lessThanEqual( value.rgb, vec3( 0.0031308 ) ) ) ), value.w );\n}\nvec4 RGBEToLinear( in vec4 value ) {\n return vec4( value.rgb * exp2( value.a * 255.0 - 128.0 ), 1.0 );\n}\nvec4 LinearToRGBE( in vec4 value ) {\n float maxComponent = max( max( value.r, value.g ), value.b );\n float fExp = clamp( ceil( log2( maxComponent ) ), -128.0, 127.0 );\n return vec4( value.rgb / exp2( fExp ), ( fExp + 128.0 ) / 255.0 );\n}\nvec4 RGBMToLinear( in vec4 value, in float maxRange ) {\n return vec4( value.xyz * value.w * maxRange, 1.0 );\n}\nvec4 LinearToRGBM( in vec4 value, in float maxRange ) {\n float maxRGB = max( value.x, max( value.g, value.b ) );\n float M = clamp( maxRGB / maxRange, 0.0, 1.0 );\n M = ceil( M * 255.0 ) / 255.0;\n return vec4( value.rgb / ( M * maxRange ), M );\n}\nvec4 RGBDToLinear( in vec4 value, in float maxRange ) {\n return vec4( value.rgb * ( ( maxRange / 255.0 ) / value.a ), 1.0 );\n}\nvec4 LinearToRGBD( in vec4 value, in float maxRange ) {\n float maxRGB = max( value.x, max( value.g, value.b ) );\n float D = max( maxRange / maxRGB, 1.0 );\n D = min( floor( D ) / 255.0, 1.0 );\n return vec4( value.rgb * ( D * ( 255.0 / maxRange ) ), D );\n}\nconst mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );\nvec4 LinearToLogLuv( in vec4 value ) {\n vec3 Xp_Y_XYZp = value.rgb * cLogLuvM;\n Xp_Y_XYZp = max(Xp_Y_XYZp, vec3(1e-6, 1e-6, 1e-6));\n vec4 vResult;\n vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;\n float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;\n vResult.w = fract(Le);\n vResult.z = (Le - (floor(vResult.w*255.0))/255.0)/255.0;\n return vResult;\n}\nconst mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );\nvec4 LogLuvToLinear( in vec4 value ) {\n float Le = value.z * 255.0 + value.w;\n vec3 Xp_Y_XYZp;\n Xp_Y_XYZp.y = exp2((Le - 127.0) / 2.0);\n Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;\n Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;\n vec3 vRGB = Xp_Y_XYZp.rgb * cLogLuvInverseM;\n return vec4( max(vRGB, 0.0), 1.0 );\n}\n", -envmap_fragment:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvec3 cameraToVertex = normalize( vWorldPosition - cameraPosition );\n\t\tvec3 worldNormal = inverseTransformDirection( normal, viewMatrix );\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvec3 reflectVec = reflect( cameraToVertex, worldNormal );\n\t\t#else\n\t\t\tvec3 reflectVec = refract( cameraToVertex, worldNormal, refractionRatio );\n\t\t#endif\n\t#else\n\t\tvec3 reflectVec = vReflect;\n\t#endif\n\t#ifdef ENVMAP_TYPE_CUBE\n\t\tvec4 envColor = textureCube( envMap, flipNormal * vec3( flipEnvMap * reflectVec.x, reflectVec.yz ) );\n\t#elif defined( ENVMAP_TYPE_EQUIREC )\n\t\tvec2 sampleUV;\n\t\tsampleUV.y = saturate( flipNormal * reflectVec.y * 0.5 + 0.5 );\n\t\tsampleUV.x = atan( flipNormal * reflectVec.z, flipNormal * reflectVec.x ) * RECIPROCAL_PI2 + 0.5;\n\t\tvec4 envColor = texture2D( envMap, sampleUV );\n\t#elif defined( ENVMAP_TYPE_SPHERE )\n\t\tvec3 reflectView = flipNormal * normalize( ( viewMatrix * vec4( reflectVec, 0.0 ) ).xyz + vec3( 0.0, 0.0, 1.0 ) );\n\t\tvec4 envColor = texture2D( envMap, reflectView.xy * 0.5 + 0.5 );\n\t#else\n\t\tvec4 envColor = vec4( 0.0 );\n\t#endif\n\tenvColor = envMapTexelToLinear( envColor );\n\t#ifdef ENVMAP_BLENDING_MULTIPLY\n\t\toutgoingLight = mix( outgoingLight, outgoingLight * envColor.xyz, specularStrength * reflectivity );\n\t#elif defined( ENVMAP_BLENDING_MIX )\n\t\toutgoingLight = mix( outgoingLight, envColor.xyz, specularStrength * reflectivity );\n\t#elif defined( ENVMAP_BLENDING_ADD )\n\t\toutgoingLight += envColor.xyz * specularStrength * reflectivity;\n\t#endif\n#endif\n", -envmap_pars_fragment:"#if defined( USE_ENVMAP ) || defined( PHYSICAL )\n\tuniform float reflectivity;\n\tuniform float envMapIntenstiy;\n#endif\n#ifdef USE_ENVMAP\n\t#if ! defined( PHYSICAL ) && ( defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG ) )\n\t\tvarying vec3 vWorldPosition;\n\t#endif\n\t#ifdef ENVMAP_TYPE_CUBE\n\t\tuniform samplerCube envMap;\n\t#else\n\t\tuniform sampler2D envMap;\n\t#endif\n\tuniform float flipEnvMap;\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG ) || defined( PHYSICAL )\n\t\tuniform float refractionRatio;\n\t#else\n\t\tvarying vec3 vReflect;\n\t#endif\n#endif\n", -envmap_pars_vertex:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvarying vec3 vWorldPosition;\n\t#else\n\t\tvarying vec3 vReflect;\n\t\tuniform float refractionRatio;\n\t#endif\n#endif\n",envmap_vertex:"#ifdef USE_ENVMAP\n\t#if defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( PHONG )\n\t\tvWorldPosition = worldPosition.xyz;\n\t#else\n\t\tvec3 cameraToVertex = normalize( worldPosition.xyz - cameraPosition );\n\t\tvec3 worldNormal = inverseTransformDirection( transformedNormal, viewMatrix );\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvReflect = reflect( cameraToVertex, worldNormal );\n\t\t#else\n\t\t\tvReflect = refract( cameraToVertex, worldNormal, refractionRatio );\n\t\t#endif\n\t#endif\n#endif\n", -fog_fragment:"#ifdef USE_FOG\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tfloat depth = gl_FragDepthEXT / gl_FragCoord.w;\n\t#else\n\t\tfloat depth = gl_FragCoord.z / gl_FragCoord.w;\n\t#endif\n\t#ifdef FOG_EXP2\n\t\tfloat fogFactor = whiteCompliment( exp2( - fogDensity * fogDensity * depth * depth * LOG2 ) );\n\t#else\n\t\tfloat fogFactor = smoothstep( fogNear, fogFar, depth );\n\t#endif\n\tgl_FragColor.rgb = mix( gl_FragColor.rgb, fogColor, fogFactor );\n#endif\n",fog_pars_fragment:"#ifdef USE_FOG\n\tuniform vec3 fogColor;\n\t#ifdef FOG_EXP2\n\t\tuniform float fogDensity;\n\t#else\n\t\tuniform float fogNear;\n\t\tuniform float fogFar;\n\t#endif\n#endif", -lightmap_fragment:"#ifdef USE_LIGHTMAP\n\treflectedLight.indirectDiffuse += PI * texture2D( lightMap, vUv2 ).xyz * lightMapIntensity;\n#endif\n",lightmap_pars_fragment:"#ifdef USE_LIGHTMAP\n\tuniform sampler2D lightMap;\n\tuniform float lightMapIntensity;\n#endif",lights_lambert_vertex:"vec3 diffuse = vec3( 1.0 );\nGeometricContext geometry;\ngeometry.position = mvPosition.xyz;\ngeometry.normal = normalize( transformedNormal );\ngeometry.viewDir = normalize( -mvPosition.xyz );\nGeometricContext backGeometry;\nbackGeometry.position = geometry.position;\nbackGeometry.normal = -geometry.normal;\nbackGeometry.viewDir = geometry.viewDir;\nvLightFront = vec3( 0.0 );\n#ifdef DOUBLE_SIDED\n\tvLightBack = vec3( 0.0 );\n#endif\nIncidentLight directLight;\nfloat dotNL;\nvec3 directLightColor_Diffuse;\n#if NUM_POINT_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tgetPointDirectLightIrradiance( pointLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_SPOT_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tgetSpotDirectLightIrradiance( spotLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_DIR_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tgetDirectionalDirectLightIrradiance( directionalLights[ i ], geometry, directLight );\n\t\tdotNL = dot( geometry.normal, directLight.direction );\n\t\tdirectLightColor_Diffuse = PI * directLight.color;\n\t\tvLightFront += saturate( dotNL ) * directLightColor_Diffuse;\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += saturate( -dotNL ) * directLightColor_Diffuse;\n\t\t#endif\n\t}\n#endif\n#if NUM_HEMI_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_HEMI_LIGHTS; i ++ ) {\n\t\tvLightFront += getHemisphereLightIrradiance( hemisphereLights[ i ], geometry );\n\t\t#ifdef DOUBLE_SIDED\n\t\t\tvLightBack += getHemisphereLightIrradiance( hemisphereLights[ i ], backGeometry );\n\t\t#endif\n\t}\n#endif\n", -lights_pars:"uniform vec3 ambientLightColor;\nvec3 getAmbientLightIrradiance( const in vec3 ambientLightColor ) {\n\tvec3 irradiance = ambientLightColor;\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\treturn irradiance;\n}\n#if NUM_DIR_LIGHTS > 0\n\tstruct DirectionalLight {\n\t\tvec3 direction;\n\t\tvec3 color;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t};\n\tuniform DirectionalLight directionalLights[ NUM_DIR_LIGHTS ];\n\tvoid getDirectionalDirectLightIrradiance( const in DirectionalLight directionalLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tdirectLight.color = directionalLight.color;\n\t\tdirectLight.direction = directionalLight.direction;\n\t\tdirectLight.visible = true;\n\t}\n#endif\n#if NUM_POINT_LIGHTS > 0\n\tstruct PointLight {\n\t\tvec3 position;\n\t\tvec3 color;\n\t\tfloat distance;\n\t\tfloat decay;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t};\n\tuniform PointLight pointLights[ NUM_POINT_LIGHTS ];\n\tvoid getPointDirectLightIrradiance( const in PointLight pointLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tvec3 lVector = pointLight.position - geometry.position;\n\t\tdirectLight.direction = normalize( lVector );\n\t\tfloat lightDistance = length( lVector );\n\t\tif ( testLightInRange( lightDistance, pointLight.distance ) ) {\n\t\t\tdirectLight.color = pointLight.color;\n\t\t\tdirectLight.color *= punctualLightIntensityToIrradianceFactor( lightDistance, pointLight.distance, pointLight.decay );\n\t\t\tdirectLight.visible = true;\n\t\t} else {\n\t\t\tdirectLight.color = vec3( 0.0 );\n\t\t\tdirectLight.visible = false;\n\t\t}\n\t}\n#endif\n#if NUM_SPOT_LIGHTS > 0\n\tstruct SpotLight {\n\t\tvec3 position;\n\t\tvec3 direction;\n\t\tvec3 color;\n\t\tfloat distance;\n\t\tfloat decay;\n\t\tfloat coneCos;\n\t\tfloat penumbraCos;\n\t\tint shadow;\n\t\tfloat shadowBias;\n\t\tfloat shadowRadius;\n\t\tvec2 shadowMapSize;\n\t};\n\tuniform SpotLight spotLights[ NUM_SPOT_LIGHTS ];\n\tvoid getSpotDirectLightIrradiance( const in SpotLight spotLight, const in GeometricContext geometry, out IncidentLight directLight ) {\n\t\tvec3 lVector = spotLight.position - geometry.position;\n\t\tdirectLight.direction = normalize( lVector );\n\t\tfloat lightDistance = length( lVector );\n\t\tfloat angleCos = dot( directLight.direction, spotLight.direction );\n\t\tif ( all( bvec2( angleCos > spotLight.coneCos, testLightInRange( lightDistance, spotLight.distance ) ) ) ) {\n\t\t\tfloat spotEffect = smoothstep( spotLight.coneCos, spotLight.penumbraCos, angleCos );\n\t\t\tdirectLight.color = spotLight.color;\n\t\t\tdirectLight.color *= spotEffect * punctualLightIntensityToIrradianceFactor( lightDistance, spotLight.distance, spotLight.decay );\n\t\t\tdirectLight.visible = true;\n\t\t} else {\n\t\t\tdirectLight.color = vec3( 0.0 );\n\t\t\tdirectLight.visible = false;\n\t\t}\n\t}\n#endif\n#if NUM_HEMI_LIGHTS > 0\n\tstruct HemisphereLight {\n\t\tvec3 direction;\n\t\tvec3 skyColor;\n\t\tvec3 groundColor;\n\t};\n\tuniform HemisphereLight hemisphereLights[ NUM_HEMI_LIGHTS ];\n\tvec3 getHemisphereLightIrradiance( const in HemisphereLight hemiLight, const in GeometricContext geometry ) {\n\t\tfloat dotNL = dot( geometry.normal, hemiLight.direction );\n\t\tfloat hemiDiffuseWeight = 0.5 * dotNL + 0.5;\n\t\tvec3 irradiance = mix( hemiLight.groundColor, hemiLight.skyColor, hemiDiffuseWeight );\n\t\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\t\tirradiance *= PI;\n\t\t#endif\n\t\treturn irradiance;\n\t}\n#endif\n#if defined( USE_ENVMAP ) && defined( PHYSICAL )\n\tvec3 getLightProbeIndirectIrradiance( const in GeometricContext geometry, const in int maxMIPLevel ) {\n\t\t#include \n\t\tvec3 worldNormal = inverseTransformDirection( geometry.normal, viewMatrix );\n\t\t#ifdef ENVMAP_TYPE_CUBE\n\t\t\tvec3 queryVec = flipNormal * vec3( flipEnvMap * worldNormal.x, worldNormal.yz );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = textureCubeLodEXT( envMap, queryVec, float( maxMIPLevel ) );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = textureCube( envMap, queryVec, float( maxMIPLevel ) );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_CUBE_UV )\n\t\t\tvec3 queryVec = flipNormal * vec3( flipEnvMap * worldNormal.x, worldNormal.yz );\n\t\t\tvec4 envMapColor = textureCubeUV( queryVec, 1.0 );\n\t\t#else\n\t\t\tvec4 envMapColor = vec4( 0.0 );\n\t\t#endif\n\t\treturn PI * envMapColor.rgb * envMapIntensity;\n\t}\n\tfloat getSpecularMIPLevel( const in float blinnShininessExponent, const in int maxMIPLevel ) {\n\t\tfloat maxMIPLevelScalar = float( maxMIPLevel );\n\t\tfloat desiredMIPLevel = maxMIPLevelScalar - 0.79248 - 0.5 * log2( pow2( blinnShininessExponent ) + 1.0 );\n\t\treturn clamp( desiredMIPLevel, 0.0, maxMIPLevelScalar );\n\t}\n\tvec3 getLightProbeIndirectRadiance( const in GeometricContext geometry, const in float blinnShininessExponent, const in int maxMIPLevel ) {\n\t\t#ifdef ENVMAP_MODE_REFLECTION\n\t\t\tvec3 reflectVec = reflect( -geometry.viewDir, geometry.normal );\n\t\t#else\n\t\t\tvec3 reflectVec = refract( -geometry.viewDir, geometry.normal, refractionRatio );\n\t\t#endif\n\t\t#include \n\t\treflectVec = inverseTransformDirection( reflectVec, viewMatrix );\n\t\tfloat specularMIPLevel = getSpecularMIPLevel( blinnShininessExponent, maxMIPLevel );\n\t\t#ifdef ENVMAP_TYPE_CUBE\n\t\t\tvec3 queryReflectVec = flipNormal * vec3( flipEnvMap * reflectVec.x, reflectVec.yz );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = textureCubeLodEXT( envMap, queryReflectVec, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = textureCube( envMap, queryReflectVec, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_CUBE_UV )\n\t\t\tvec3 queryReflectVec = flipNormal * vec3( flipEnvMap * reflectVec.x, reflectVec.yz );\n\t\t\tvec4 envMapColor = textureCubeUV(queryReflectVec, BlinnExponentToGGXRoughness(blinnShininessExponent));\n\t\t#elif defined( ENVMAP_TYPE_EQUIREC )\n\t\t\tvec2 sampleUV;\n\t\t\tsampleUV.y = saturate( flipNormal * reflectVec.y * 0.5 + 0.5 );\n\t\t\tsampleUV.x = atan( flipNormal * reflectVec.z, flipNormal * reflectVec.x ) * RECIPROCAL_PI2 + 0.5;\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = texture2DLodEXT( envMap, sampleUV, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = texture2D( envMap, sampleUV, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#elif defined( ENVMAP_TYPE_SPHERE )\n\t\t\tvec3 reflectView = flipNormal * normalize( ( viewMatrix * vec4( reflectVec, 0.0 ) ).xyz + vec3( 0.0,0.0,1.0 ) );\n\t\t\t#ifdef TEXTURE_LOD_EXT\n\t\t\t\tvec4 envMapColor = texture2DLodEXT( envMap, reflectView.xy * 0.5 + 0.5, specularMIPLevel );\n\t\t\t#else\n\t\t\t\tvec4 envMapColor = texture2D( envMap, reflectView.xy * 0.5 + 0.5, specularMIPLevel );\n\t\t\t#endif\n\t\t\tenvMapColor.rgb = envMapTexelToLinear( envMapColor ).rgb;\n\t\t#endif\n\t\treturn envMapColor.rgb * envMapIntensity;\n\t}\n#endif\n", -lights_phong_fragment:"BlinnPhongMaterial material;\nmaterial.diffuseColor = diffuseColor.rgb;\nmaterial.specularColor = specular;\nmaterial.specularShininess = shininess;\nmaterial.specularStrength = specularStrength;\n",lights_phong_pars_fragment:"varying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\nstruct BlinnPhongMaterial {\n\tvec3\tdiffuseColor;\n\tvec3\tspecularColor;\n\tfloat\tspecularShininess;\n\tfloat\tspecularStrength;\n};\nvoid RE_Direct_BlinnPhong( const in IncidentLight directLight, const in GeometricContext geometry, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {\n\tfloat dotNL = saturate( dot( geometry.normal, directLight.direction ) );\n\tvec3 irradiance = dotNL * directLight.color;\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\treflectedLight.directDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n\treflectedLight.directSpecular += irradiance * BRDF_Specular_BlinnPhong( directLight, geometry, material.specularColor, material.specularShininess ) * material.specularStrength;\n}\nvoid RE_IndirectDiffuse_BlinnPhong( const in vec3 irradiance, const in GeometricContext geometry, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {\n\treflectedLight.indirectDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n}\n#define RE_Direct\t\t\t\tRE_Direct_BlinnPhong\n#define RE_IndirectDiffuse\t\tRE_IndirectDiffuse_BlinnPhong\n#define Material_LightProbeLOD( material )\t(0)\n", -lights_physical_fragment:"PhysicalMaterial material;\nmaterial.diffuseColor = diffuseColor.rgb * ( 1.0 - metalnessFactor );\nmaterial.specularRoughness = clamp( roughnessFactor, 0.04, 1.0 );\n#ifdef STANDARD\n\tmaterial.specularColor = mix( vec3( DEFAULT_SPECULAR_COEFFICIENT ), diffuseColor.rgb, metalnessFactor );\n#else\n\tmaterial.specularColor = mix( vec3( MAXIMUM_SPECULAR_COEFFICIENT * pow2( reflectivity ) ), diffuseColor.rgb, metalnessFactor );\n\tmaterial.clearCoat = saturate( clearCoat );\tmaterial.clearCoatRoughness = clamp( clearCoatRoughness, 0.04, 1.0 );\n#endif\n", -lights_physical_pars_fragment:"struct PhysicalMaterial {\n\tvec3\tdiffuseColor;\n\tfloat\tspecularRoughness;\n\tvec3\tspecularColor;\n\t#ifndef STANDARD\n\t\tfloat clearCoat;\n\t\tfloat clearCoatRoughness;\n\t#endif\n};\n#define MAXIMUM_SPECULAR_COEFFICIENT 0.16\n#define DEFAULT_SPECULAR_COEFFICIENT 0.04\nfloat clearCoatDHRApprox( const in float roughness, const in float dotNL ) {\n\treturn DEFAULT_SPECULAR_COEFFICIENT + ( 1.0 - DEFAULT_SPECULAR_COEFFICIENT ) * ( pow( 1.0 - dotNL, 5.0 ) * pow( 1.0 - roughness, 2.0 ) );\n}\nvoid RE_Direct_Physical( const in IncidentLight directLight, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\tfloat dotNL = saturate( dot( geometry.normal, directLight.direction ) );\n\tvec3 irradiance = dotNL * directLight.color;\n\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\tirradiance *= PI;\n\t#endif\n\t#ifndef STANDARD\n\t\tfloat clearCoatDHR = material.clearCoat * clearCoatDHRApprox( material.clearCoatRoughness, dotNL );\n\t#else\n\t\tfloat clearCoatDHR = 0.0;\n\t#endif\n\treflectedLight.directSpecular += ( 1.0 - clearCoatDHR ) * irradiance * BRDF_Specular_GGX( directLight, geometry, material.specularColor, material.specularRoughness );\n\treflectedLight.directDiffuse += ( 1.0 - clearCoatDHR ) * irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n\t#ifndef STANDARD\n\t\treflectedLight.directSpecular += irradiance * material.clearCoat * BRDF_Specular_GGX( directLight, geometry, vec3( DEFAULT_SPECULAR_COEFFICIENT ), material.clearCoatRoughness );\n\t#endif\n}\nvoid RE_IndirectDiffuse_Physical( const in vec3 irradiance, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\treflectedLight.indirectDiffuse += irradiance * BRDF_Diffuse_Lambert( material.diffuseColor );\n}\nvoid RE_IndirectSpecular_Physical( const in vec3 radiance, const in vec3 clearCoatRadiance, const in GeometricContext geometry, const in PhysicalMaterial material, inout ReflectedLight reflectedLight ) {\n\t#ifndef STANDARD\n\t\tfloat dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );\n\t\tfloat dotNL = dotNV;\n\t\tfloat clearCoatDHR = material.clearCoat * clearCoatDHRApprox( material.clearCoatRoughness, dotNL );\n\t#else\n\t\tfloat clearCoatDHR = 0.0;\n\t#endif\n\treflectedLight.indirectSpecular += ( 1.0 - clearCoatDHR ) * radiance * BRDF_Specular_GGX_Environment( geometry, material.specularColor, material.specularRoughness );\n\t#ifndef STANDARD\n\t\treflectedLight.indirectSpecular += clearCoatRadiance * material.clearCoat * BRDF_Specular_GGX_Environment( geometry, vec3( DEFAULT_SPECULAR_COEFFICIENT ), material.clearCoatRoughness );\n\t#endif\n}\n#define RE_Direct\t\t\t\tRE_Direct_Physical\n#define RE_IndirectDiffuse\t\tRE_IndirectDiffuse_Physical\n#define RE_IndirectSpecular\t\tRE_IndirectSpecular_Physical\n#define Material_BlinnShininessExponent( material ) GGXRoughnessToBlinnExponent( material.specularRoughness )\n#define Material_ClearCoat_BlinnShininessExponent( material ) GGXRoughnessToBlinnExponent( material.clearCoatRoughness )\nfloat computeSpecularOcclusion( const in float dotNV, const in float ambientOcclusion, const in float roughness ) {\n\treturn saturate( pow( dotNV + ambientOcclusion, exp2( - 16.0 * roughness - 1.0 ) ) - 1.0 + ambientOcclusion );\n}\n", -lights_template:"\nGeometricContext geometry;\ngeometry.position = - vViewPosition;\ngeometry.normal = normal;\ngeometry.viewDir = normalize( vViewPosition );\nIncidentLight directLight;\n#if ( NUM_POINT_LIGHTS > 0 ) && defined( RE_Direct )\n\tPointLight pointLight;\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tpointLight = pointLights[ i ];\n\t\tgetPointDirectLightIrradiance( pointLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( pointLight.shadow, directLight.visible ) ) ? getPointShadow( pointShadowMap[ i ], pointLight.shadowMapSize, pointLight.shadowBias, pointLight.shadowRadius, vPointShadowCoord[ i ] ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if ( NUM_SPOT_LIGHTS > 0 ) && defined( RE_Direct )\n\tSpotLight spotLight;\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tspotLight = spotLights[ i ];\n\t\tgetSpotDirectLightIrradiance( spotLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( spotLight.shadow, directLight.visible ) ) ? getShadow( spotShadowMap[ i ], spotLight.shadowMapSize, spotLight.shadowBias, spotLight.shadowRadius, vSpotShadowCoord[ i ] ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if ( NUM_DIR_LIGHTS > 0 ) && defined( RE_Direct )\n\tDirectionalLight directionalLight;\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tdirectionalLight = directionalLights[ i ];\n\t\tgetDirectionalDirectLightIrradiance( directionalLight, geometry, directLight );\n\t\t#ifdef USE_SHADOWMAP\n\t\tdirectLight.color *= all( bvec2( directionalLight.shadow, directLight.visible ) ) ? getShadow( directionalShadowMap[ i ], directionalLight.shadowMapSize, directionalLight.shadowBias, directionalLight.shadowRadius, vDirectionalShadowCoord[ i ] ) : 1.0;\n\t\t#endif\n\t\tRE_Direct( directLight, geometry, material, reflectedLight );\n\t}\n#endif\n#if defined( RE_IndirectDiffuse )\n\tvec3 irradiance = getAmbientLightIrradiance( ambientLightColor );\n\t#ifdef USE_LIGHTMAP\n\t\tvec3 lightMapIrradiance = texture2D( lightMap, vUv2 ).xyz * lightMapIntensity;\n\t\t#ifndef PHYSICALLY_CORRECT_LIGHTS\n\t\t\tlightMapIrradiance *= PI;\n\t\t#endif\n\t\tirradiance += lightMapIrradiance;\n\t#endif\n\t#if ( NUM_HEMI_LIGHTS > 0 )\n\t\tfor ( int i = 0; i < NUM_HEMI_LIGHTS; i ++ ) {\n\t\t\tirradiance += getHemisphereLightIrradiance( hemisphereLights[ i ], geometry );\n\t\t}\n\t#endif\n\t#if defined( USE_ENVMAP ) && defined( PHYSICAL ) && defined( ENVMAP_TYPE_CUBE_UV )\n\t \tirradiance += getLightProbeIndirectIrradiance( geometry, 8 );\n\t#endif\n\tRE_IndirectDiffuse( irradiance, geometry, material, reflectedLight );\n#endif\n#if defined( USE_ENVMAP ) && defined( RE_IndirectSpecular )\n\tvec3 radiance = getLightProbeIndirectRadiance( geometry, Material_BlinnShininessExponent( material ), 8 );\n\t#ifndef STANDARD\n\t\tvec3 clearCoatRadiance = getLightProbeIndirectRadiance( geometry, Material_ClearCoat_BlinnShininessExponent( material ), 8 );\n\t#else\n\t\tvec3 clearCoatRadiance = vec3( 0.0 );\n\t#endif\n\t\t\n\tRE_IndirectSpecular( radiance, clearCoatRadiance, geometry, material, reflectedLight );\n#endif\n", -logdepthbuf_fragment:"#if defined(USE_LOGDEPTHBUF) && defined(USE_LOGDEPTHBUF_EXT)\n\tgl_FragDepthEXT = log2(vFragDepth) * logDepthBufFC * 0.5;\n#endif",logdepthbuf_pars_fragment:"#ifdef USE_LOGDEPTHBUF\n\tuniform float logDepthBufFC;\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tvarying float vFragDepth;\n\t#endif\n#endif\n",logdepthbuf_pars_vertex:"#ifdef USE_LOGDEPTHBUF\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tvarying float vFragDepth;\n\t#endif\n\tuniform float logDepthBufFC;\n#endif",logdepthbuf_vertex:"#ifdef USE_LOGDEPTHBUF\n\tgl_Position.z = log2(max( EPSILON, gl_Position.w + 1.0 )) * logDepthBufFC;\n\t#ifdef USE_LOGDEPTHBUF_EXT\n\t\tvFragDepth = 1.0 + gl_Position.w;\n\t#else\n\t\tgl_Position.z = (gl_Position.z - 1.0) * gl_Position.w;\n\t#endif\n#endif\n", -map_fragment:"#ifdef USE_MAP\n\tvec4 texelColor = texture2D( map, vUv );\n\ttexelColor = mapTexelToLinear( texelColor );\n\tdiffuseColor *= texelColor;\n#endif\n",map_pars_fragment:"#ifdef USE_MAP\n\tuniform sampler2D map;\n#endif\n",map_particle_fragment:"#ifdef USE_MAP\n\tvec4 mapTexel = texture2D( map, vec2( gl_PointCoord.x, 1.0 - gl_PointCoord.y ) * offsetRepeat.zw + offsetRepeat.xy );\n\tdiffuseColor *= mapTexelToLinear( mapTexel );\n#endif\n",map_particle_pars_fragment:"#ifdef USE_MAP\n\tuniform vec4 offsetRepeat;\n\tuniform sampler2D map;\n#endif\n", -metalnessmap_fragment:"float metalnessFactor = metalness;\n#ifdef USE_METALNESSMAP\n\tvec4 texelMetalness = texture2D( metalnessMap, vUv );\n\tmetalnessFactor *= texelMetalness.r;\n#endif\n",metalnessmap_pars_fragment:"#ifdef USE_METALNESSMAP\n\tuniform sampler2D metalnessMap;\n#endif",morphnormal_vertex:"#ifdef USE_MORPHNORMALS\n\tobjectNormal += ( morphNormal0 - normal ) * morphTargetInfluences[ 0 ];\n\tobjectNormal += ( morphNormal1 - normal ) * morphTargetInfluences[ 1 ];\n\tobjectNormal += ( morphNormal2 - normal ) * morphTargetInfluences[ 2 ];\n\tobjectNormal += ( morphNormal3 - normal ) * morphTargetInfluences[ 3 ];\n#endif\n", -morphtarget_pars_vertex:"#ifdef USE_MORPHTARGETS\n\t#ifndef USE_MORPHNORMALS\n\tuniform float morphTargetInfluences[ 8 ];\n\t#else\n\tuniform float morphTargetInfluences[ 4 ];\n\t#endif\n#endif",morphtarget_vertex:"#ifdef USE_MORPHTARGETS\n\ttransformed += ( morphTarget0 - position ) * morphTargetInfluences[ 0 ];\n\ttransformed += ( morphTarget1 - position ) * morphTargetInfluences[ 1 ];\n\ttransformed += ( morphTarget2 - position ) * morphTargetInfluences[ 2 ];\n\ttransformed += ( morphTarget3 - position ) * morphTargetInfluences[ 3 ];\n\t#ifndef USE_MORPHNORMALS\n\ttransformed += ( morphTarget4 - position ) * morphTargetInfluences[ 4 ];\n\ttransformed += ( morphTarget5 - position ) * morphTargetInfluences[ 5 ];\n\ttransformed += ( morphTarget6 - position ) * morphTargetInfluences[ 6 ];\n\ttransformed += ( morphTarget7 - position ) * morphTargetInfluences[ 7 ];\n\t#endif\n#endif\n", -normal_flip:"#ifdef DOUBLE_SIDED\n\tfloat flipNormal = ( float( gl_FrontFacing ) * 2.0 - 1.0 );\n#else\n\tfloat flipNormal = 1.0;\n#endif\n",normal_fragment:"#ifdef FLAT_SHADED\n\tvec3 fdx = vec3( dFdx( vViewPosition.x ), dFdx( vViewPosition.y ), dFdx( vViewPosition.z ) );\n\tvec3 fdy = vec3( dFdy( vViewPosition.x ), dFdy( vViewPosition.y ), dFdy( vViewPosition.z ) );\n\tvec3 normal = normalize( cross( fdx, fdy ) );\n#else\n\tvec3 normal = normalize( vNormal ) * flipNormal;\n#endif\n#ifdef USE_NORMALMAP\n\tnormal = perturbNormal2Arb( -vViewPosition, normal );\n#elif defined( USE_BUMPMAP )\n\tnormal = perturbNormalArb( -vViewPosition, normal, dHdxy_fwd() );\n#endif\n", -normalmap_pars_fragment:"#ifdef USE_NORMALMAP\n\tuniform sampler2D normalMap;\n\tuniform vec2 normalScale;\n\tvec3 perturbNormal2Arb( vec3 eye_pos, vec3 surf_norm ) {\n\t\tvec3 q0 = dFdx( eye_pos.xyz );\n\t\tvec3 q1 = dFdy( eye_pos.xyz );\n\t\tvec2 st0 = dFdx( vUv.st );\n\t\tvec2 st1 = dFdy( vUv.st );\n\t\tvec3 S = normalize( q0 * st1.t - q1 * st0.t );\n\t\tvec3 T = normalize( -q0 * st1.s + q1 * st0.s );\n\t\tvec3 N = normalize( surf_norm );\n\t\tvec3 mapN = texture2D( normalMap, vUv ).xyz * 2.0 - 1.0;\n\t\tmapN.xy = normalScale * mapN.xy;\n\t\tmat3 tsn = mat3( S, T, N );\n\t\treturn normalize( tsn * mapN );\n\t}\n#endif\n", -packing:"vec3 packNormalToRGB( const in vec3 normal ) {\n return normalize( normal ) * 0.5 + 0.5;\n}\nvec3 unpackRGBToNormal( const in vec3 rgb ) {\n return 1.0 - 2.0 * rgb.xyz;\n}\nconst float PackUpscale = 256. / 255.;const float UnpackDownscale = 255. / 256.;\nconst vec3 PackFactors = vec3( 256. * 256. * 256., 256. * 256., 256. );\nconst vec4 UnpackFactors = UnpackDownscale / vec4( PackFactors, 1. );\nconst float ShiftRight8 = 1. / 256.;\nvec4 packDepthToRGBA( const in float v ) {\n\tvec4 r = vec4( fract( v * PackFactors ), v );\n\tr.yzw -= r.xyz * ShiftRight8;\treturn r * PackUpscale;\n}\nfloat unpackRGBAToDepth( const in vec4 v ) {\n\treturn dot( v, UnpackFactors );\n}\nfloat viewZToOrthographicDepth( const in float viewZ, const in float near, const in float far ) {\n return ( viewZ + near ) / ( near - far );\n}\nfloat orthographicDepthToViewZ( const in float linearClipZ, const in float near, const in float far ) {\n return linearClipZ * ( near - far ) - near;\n}\nfloat viewZToPerspectiveDepth( const in float viewZ, const in float near, const in float far ) {\n return (( near + viewZ ) * far ) / (( far - near ) * viewZ );\n}\nfloat perspectiveDepthToViewZ( const in float invClipZ, const in float near, const in float far ) {\n return ( near * far ) / ( ( far - near ) * invClipZ - far );\n}\n", -premultiplied_alpha_fragment:"#ifdef PREMULTIPLIED_ALPHA\n\tgl_FragColor.rgb *= gl_FragColor.a;\n#endif\n",project_vertex:"#ifdef USE_SKINNING\n\tvec4 mvPosition = modelViewMatrix * skinned;\n#else\n\tvec4 mvPosition = modelViewMatrix * vec4( transformed, 1.0 );\n#endif\ngl_Position = projectionMatrix * mvPosition;\n",roughnessmap_fragment:"float roughnessFactor = roughness;\n#ifdef USE_ROUGHNESSMAP\n\tvec4 texelRoughness = texture2D( roughnessMap, vUv );\n\troughnessFactor *= texelRoughness.r;\n#endif\n", -roughnessmap_pars_fragment:"#ifdef USE_ROUGHNESSMAP\n\tuniform sampler2D roughnessMap;\n#endif",shadowmap_pars_fragment:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\t\tuniform sampler2D directionalShadowMap[ NUM_DIR_LIGHTS ];\n\t\tvarying vec4 vDirectionalShadowCoord[ NUM_DIR_LIGHTS ];\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\t\tuniform sampler2D spotShadowMap[ NUM_SPOT_LIGHTS ];\n\t\tvarying vec4 vSpotShadowCoord[ NUM_SPOT_LIGHTS ];\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\t\tuniform sampler2D pointShadowMap[ NUM_POINT_LIGHTS ];\n\t\tvarying vec4 vPointShadowCoord[ NUM_POINT_LIGHTS ];\n\t#endif\n\tfloat texture2DCompare( sampler2D depths, vec2 uv, float compare ) {\n\t\treturn step( compare, unpackRGBAToDepth( texture2D( depths, uv ) ) );\n\t}\n\tfloat texture2DShadowLerp( sampler2D depths, vec2 size, vec2 uv, float compare ) {\n\t\tconst vec2 offset = vec2( 0.0, 1.0 );\n\t\tvec2 texelSize = vec2( 1.0 ) / size;\n\t\tvec2 centroidUV = floor( uv * size + 0.5 ) / size;\n\t\tfloat lb = texture2DCompare( depths, centroidUV + texelSize * offset.xx, compare );\n\t\tfloat lt = texture2DCompare( depths, centroidUV + texelSize * offset.xy, compare );\n\t\tfloat rb = texture2DCompare( depths, centroidUV + texelSize * offset.yx, compare );\n\t\tfloat rt = texture2DCompare( depths, centroidUV + texelSize * offset.yy, compare );\n\t\tvec2 f = fract( uv * size + 0.5 );\n\t\tfloat a = mix( lb, lt, f.y );\n\t\tfloat b = mix( rb, rt, f.y );\n\t\tfloat c = mix( a, b, f.x );\n\t\treturn c;\n\t}\n\tfloat getShadow( sampler2D shadowMap, vec2 shadowMapSize, float shadowBias, float shadowRadius, vec4 shadowCoord ) {\n\t\tshadowCoord.xyz /= shadowCoord.w;\n\t\tshadowCoord.z += shadowBias;\n\t\tbvec4 inFrustumVec = bvec4 ( shadowCoord.x >= 0.0, shadowCoord.x <= 1.0, shadowCoord.y >= 0.0, shadowCoord.y <= 1.0 );\n\t\tbool inFrustum = all( inFrustumVec );\n\t\tbvec2 frustumTestVec = bvec2( inFrustum, shadowCoord.z <= 1.0 );\n\t\tbool frustumTest = all( frustumTestVec );\n\t\tif ( frustumTest ) {\n\t\t#if defined( SHADOWMAP_TYPE_PCF )\n\t\t\tvec2 texelSize = vec2( 1.0 ) / shadowMapSize;\n\t\t\tfloat dx0 = - texelSize.x * shadowRadius;\n\t\t\tfloat dy0 = - texelSize.y * shadowRadius;\n\t\t\tfloat dx1 = + texelSize.x * shadowRadius;\n\t\t\tfloat dy1 = + texelSize.y * shadowRadius;\n\t\t\treturn (\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( 0.0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy, shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( 0.0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DCompare( shadowMap, shadowCoord.xy + vec2( dx1, dy1 ), shadowCoord.z )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#elif defined( SHADOWMAP_TYPE_PCF_SOFT )\n\t\t\tvec2 texelSize = vec2( 1.0 ) / shadowMapSize;\n\t\t\tfloat dx0 = - texelSize.x * shadowRadius;\n\t\t\tfloat dy0 = - texelSize.y * shadowRadius;\n\t\t\tfloat dx1 = + texelSize.x * shadowRadius;\n\t\t\tfloat dy1 = + texelSize.y * shadowRadius;\n\t\t\treturn (\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( 0.0, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, dy0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy, shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, 0.0 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( 0.0, dy1 ), shadowCoord.z ) +\n\t\t\t\ttexture2DShadowLerp( shadowMap, shadowMapSize, shadowCoord.xy + vec2( dx1, dy1 ), shadowCoord.z )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#else\n\t\t\treturn texture2DCompare( shadowMap, shadowCoord.xy, shadowCoord.z );\n\t\t#endif\n\t\t}\n\t\treturn 1.0;\n\t}\n\tvec2 cubeToUV( vec3 v, float texelSizeY ) {\n\t\tvec3 absV = abs( v );\n\t\tfloat scaleToCube = 1.0 / max( absV.x, max( absV.y, absV.z ) );\n\t\tabsV *= scaleToCube;\n\t\tv *= scaleToCube * ( 1.0 - 2.0 * texelSizeY );\n\t\tvec2 planar = v.xy;\n\t\tfloat almostATexel = 1.5 * texelSizeY;\n\t\tfloat almostOne = 1.0 - almostATexel;\n\t\tif ( absV.z >= almostOne ) {\n\t\t\tif ( v.z > 0.0 )\n\t\t\t\tplanar.x = 4.0 - v.x;\n\t\t} else if ( absV.x >= almostOne ) {\n\t\t\tfloat signX = sign( v.x );\n\t\t\tplanar.x = v.z * signX + 2.0 * signX;\n\t\t} else if ( absV.y >= almostOne ) {\n\t\t\tfloat signY = sign( v.y );\n\t\t\tplanar.x = v.x + 2.0 * signY + 2.0;\n\t\t\tplanar.y = v.z * signY - 2.0;\n\t\t}\n\t\treturn vec2( 0.125, 0.25 ) * planar + vec2( 0.375, 0.75 );\n\t}\n\tfloat getPointShadow( sampler2D shadowMap, vec2 shadowMapSize, float shadowBias, float shadowRadius, vec4 shadowCoord ) {\n\t\tvec2 texelSize = vec2( 1.0 ) / ( shadowMapSize * vec2( 4.0, 2.0 ) );\n\t\tvec3 lightToPosition = shadowCoord.xyz;\n\t\tvec3 bd3D = normalize( lightToPosition );\n\t\tfloat dp = ( length( lightToPosition ) - shadowBias ) / 1000.0;\n\t\t#if defined( SHADOWMAP_TYPE_PCF ) || defined( SHADOWMAP_TYPE_PCF_SOFT )\n\t\t\tvec2 offset = vec2( - 1, 1 ) * shadowRadius * texelSize.y;\n\t\t\treturn (\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xyy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yyy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xyx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yyx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xxy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yxy, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.xxx, texelSize.y ), dp ) +\n\t\t\t\ttexture2DCompare( shadowMap, cubeToUV( bd3D + offset.yxx, texelSize.y ), dp )\n\t\t\t) * ( 1.0 / 9.0 );\n\t\t#else\n\t\t\treturn texture2DCompare( shadowMap, cubeToUV( bd3D, texelSize.y ), dp );\n\t\t#endif\n\t}\n#endif\n", -shadowmap_pars_vertex:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\t\tuniform mat4 directionalShadowMatrix[ NUM_DIR_LIGHTS ];\n\t\tvarying vec4 vDirectionalShadowCoord[ NUM_DIR_LIGHTS ];\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\t\tuniform mat4 spotShadowMatrix[ NUM_SPOT_LIGHTS ];\n\t\tvarying vec4 vSpotShadowCoord[ NUM_SPOT_LIGHTS ];\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\t\tuniform mat4 pointShadowMatrix[ NUM_POINT_LIGHTS ];\n\t\tvarying vec4 vPointShadowCoord[ NUM_POINT_LIGHTS ];\n\t#endif\n#endif\n", -shadowmap_vertex:"#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tvDirectionalShadowCoord[ i ] = directionalShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tvSpotShadowCoord[ i ] = spotShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tvPointShadowCoord[ i ] = pointShadowMatrix[ i ] * worldPosition;\n\t}\n\t#endif\n#endif\n", -shadowmask_pars_fragment:"float getShadowMask() {\n\tfloat shadow = 1.0;\n\t#ifdef USE_SHADOWMAP\n\t#if NUM_DIR_LIGHTS > 0\n\tDirectionalLight directionalLight;\n\tfor ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) {\n\t\tdirectionalLight = directionalLights[ i ];\n\t\tshadow *= bool( directionalLight.shadow ) ? getShadow( directionalShadowMap[ i ], directionalLight.shadowMapSize, directionalLight.shadowBias, directionalLight.shadowRadius, vDirectionalShadowCoord[ i ] ) : 1.0;\n\t}\n\t#endif\n\t#if NUM_SPOT_LIGHTS > 0\n\tSpotLight spotLight;\n\tfor ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) {\n\t\tspotLight = spotLights[ i ];\n\t\tshadow *= bool( spotLight.shadow ) ? getShadow( spotShadowMap[ i ], spotLight.shadowMapSize, spotLight.shadowBias, spotLight.shadowRadius, vSpotShadowCoord[ i ] ) : 1.0;\n\t}\n\t#endif\n\t#if NUM_POINT_LIGHTS > 0\n\tPointLight pointLight;\n\tfor ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) {\n\t\tpointLight = pointLights[ i ];\n\t\tshadow *= bool( pointLight.shadow ) ? getPointShadow( pointShadowMap[ i ], pointLight.shadowMapSize, pointLight.shadowBias, pointLight.shadowRadius, vPointShadowCoord[ i ] ) : 1.0;\n\t}\n\t#endif\n\t#endif\n\treturn shadow;\n}\n", -skinbase_vertex:"#ifdef USE_SKINNING\n\tmat4 boneMatX = getBoneMatrix( skinIndex.x );\n\tmat4 boneMatY = getBoneMatrix( skinIndex.y );\n\tmat4 boneMatZ = getBoneMatrix( skinIndex.z );\n\tmat4 boneMatW = getBoneMatrix( skinIndex.w );\n#endif",skinning_pars_vertex:"#ifdef USE_SKINNING\n\tuniform mat4 bindMatrix;\n\tuniform mat4 bindMatrixInverse;\n\t#ifdef BONE_TEXTURE\n\t\tuniform sampler2D boneTexture;\n\t\tuniform int boneTextureWidth;\n\t\tuniform int boneTextureHeight;\n\t\tmat4 getBoneMatrix( const in float i ) {\n\t\t\tfloat j = i * 4.0;\n\t\t\tfloat x = mod( j, float( boneTextureWidth ) );\n\t\t\tfloat y = floor( j / float( boneTextureWidth ) );\n\t\t\tfloat dx = 1.0 / float( boneTextureWidth );\n\t\t\tfloat dy = 1.0 / float( boneTextureHeight );\n\t\t\ty = dy * ( y + 0.5 );\n\t\t\tvec4 v1 = texture2D( boneTexture, vec2( dx * ( x + 0.5 ), y ) );\n\t\t\tvec4 v2 = texture2D( boneTexture, vec2( dx * ( x + 1.5 ), y ) );\n\t\t\tvec4 v3 = texture2D( boneTexture, vec2( dx * ( x + 2.5 ), y ) );\n\t\t\tvec4 v4 = texture2D( boneTexture, vec2( dx * ( x + 3.5 ), y ) );\n\t\t\tmat4 bone = mat4( v1, v2, v3, v4 );\n\t\t\treturn bone;\n\t\t}\n\t#else\n\t\tuniform mat4 boneMatrices[ MAX_BONES ];\n\t\tmat4 getBoneMatrix( const in float i ) {\n\t\t\tmat4 bone = boneMatrices[ int(i) ];\n\t\t\treturn bone;\n\t\t}\n\t#endif\n#endif\n", -skinning_vertex:"#ifdef USE_SKINNING\n\tvec4 skinVertex = bindMatrix * vec4( transformed, 1.0 );\n\tvec4 skinned = vec4( 0.0 );\n\tskinned += boneMatX * skinVertex * skinWeight.x;\n\tskinned += boneMatY * skinVertex * skinWeight.y;\n\tskinned += boneMatZ * skinVertex * skinWeight.z;\n\tskinned += boneMatW * skinVertex * skinWeight.w;\n\tskinned = bindMatrixInverse * skinned;\n#endif\n",skinnormal_vertex:"#ifdef USE_SKINNING\n\tmat4 skinMatrix = mat4( 0.0 );\n\tskinMatrix += skinWeight.x * boneMatX;\n\tskinMatrix += skinWeight.y * boneMatY;\n\tskinMatrix += skinWeight.z * boneMatZ;\n\tskinMatrix += skinWeight.w * boneMatW;\n\tskinMatrix = bindMatrixInverse * skinMatrix * bindMatrix;\n\tobjectNormal = vec4( skinMatrix * vec4( objectNormal, 0.0 ) ).xyz;\n#endif\n", -specularmap_fragment:"float specularStrength;\n#ifdef USE_SPECULARMAP\n\tvec4 texelSpecular = texture2D( specularMap, vUv );\n\tspecularStrength = texelSpecular.r;\n#else\n\tspecularStrength = 1.0;\n#endif",specularmap_pars_fragment:"#ifdef USE_SPECULARMAP\n\tuniform sampler2D specularMap;\n#endif",tonemapping_fragment:"#if defined( TONE_MAPPING )\n gl_FragColor.rgb = toneMapping( gl_FragColor.rgb );\n#endif\n",tonemapping_pars_fragment:"#define saturate(a) clamp( a, 0.0, 1.0 )\nuniform float toneMappingExposure;\nuniform float toneMappingWhitePoint;\nvec3 LinearToneMapping( vec3 color ) {\n return toneMappingExposure * color;\n}\nvec3 ReinhardToneMapping( vec3 color ) {\n color *= toneMappingExposure;\n return saturate( color / ( vec3( 1.0 ) + color ) );\n}\n#define Uncharted2Helper( x ) max( ( ( x * ( 0.15 * x + 0.10 * 0.50 ) + 0.20 * 0.02 ) / ( x * ( 0.15 * x + 0.50 ) + 0.20 * 0.30 ) ) - 0.02 / 0.30, vec3( 0.0 ) )\nvec3 Uncharted2ToneMapping( vec3 color ) {\n color *= toneMappingExposure;\n return saturate( Uncharted2Helper( color ) / Uncharted2Helper( vec3( toneMappingWhitePoint ) ) );\n}\nvec3 OptimizedCineonToneMapping( vec3 color ) {\n color *= toneMappingExposure;\n color = max( vec3( 0.0 ), color - 0.004 );\n return pow( ( color * ( 6.2 * color + 0.5 ) ) / ( color * ( 6.2 * color + 1.7 ) + 0.06 ), vec3( 2.2 ) );\n}\n", -uv_pars_fragment:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvarying vec2 vUv;\n#endif",uv_pars_vertex:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvarying vec2 vUv;\n\tuniform vec4 offsetRepeat;\n#endif\n", -uv_vertex:"#if defined( USE_MAP ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP ) || defined( USE_SPECULARMAP ) || defined( USE_ALPHAMAP ) || defined( USE_EMISSIVEMAP ) || defined( USE_ROUGHNESSMAP ) || defined( USE_METALNESSMAP )\n\tvUv = uv * offsetRepeat.zw + offsetRepeat.xy;\n#endif",uv2_pars_fragment:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tvarying vec2 vUv2;\n#endif",uv2_pars_vertex:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tattribute vec2 uv2;\n\tvarying vec2 vUv2;\n#endif", -uv2_vertex:"#if defined( USE_LIGHTMAP ) || defined( USE_AOMAP )\n\tvUv2 = uv2;\n#endif",worldpos_vertex:"#if defined( USE_ENVMAP ) || defined( PHONG ) || defined( PHYSICAL ) || defined( LAMBERT ) || defined ( USE_SHADOWMAP )\n\t#ifdef USE_SKINNING\n\t\tvec4 worldPosition = modelMatrix * skinned;\n\t#else\n\t\tvec4 worldPosition = modelMatrix * vec4( transformed, 1.0 );\n\t#endif\n#endif\n",cube_frag:"uniform samplerCube tCube;\nuniform float tFlip;\nuniform float opacity;\nvarying vec3 vWorldPosition;\n#include \nvoid main() {\n\tgl_FragColor = textureCube( tCube, vec3( tFlip * vWorldPosition.x, vWorldPosition.yz ) );\n\tgl_FragColor.a *= opacity;\n}\n", -cube_vert:"varying vec3 vWorldPosition;\n#include \nvoid main() {\n\tvWorldPosition = transformDirection( position, modelMatrix );\n\t#include \n\t#include \n}\n",depth_frag:"#if DEPTH_PACKING == 3200\n\tuniform float opacity;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec4 diffuseColor = vec4( 1.0 );\n\t#if DEPTH_PACKING == 3200\n\t\tdiffuseColor.a = opacity;\n\t#endif\n\t#include \n\t#include \n\t#include \n\t#include \n\t#if DEPTH_PACKING == 3200\n\t\tgl_FragColor = vec4( vec3( gl_FragCoord.z ), opacity );\n\t#elif DEPTH_PACKING == 3201\n\t\tgl_FragColor = packDepthToRGBA( gl_FragCoord.z );\n\t#endif\n}\n", -depth_vert:"#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -distanceRGBA_frag:"uniform vec3 lightPos;\nvarying vec4 vWorldPosition;\n#include \n#include \n#include \nvoid main () {\n\t#include \n\tgl_FragColor = packDepthToRGBA( length( vWorldPosition.xyz - lightPos.xyz ) / 1000.0 );\n}\n",distanceRGBA_vert:"varying vec4 vWorldPosition;\n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tvWorldPosition = worldPosition;\n}\n", -equirect_frag:"uniform sampler2D tEquirect;\nuniform float tFlip;\nvarying vec3 vWorldPosition;\n#include \nvoid main() {\n\tvec3 direction = normalize( vWorldPosition );\n\tvec2 sampleUV;\n\tsampleUV.y = saturate( tFlip * direction.y * -0.5 + 0.5 );\n\tsampleUV.x = atan( direction.z, direction.x ) * RECIPROCAL_PI2 + 0.5;\n\tgl_FragColor = texture2D( tEquirect, sampleUV );\n}\n",equirect_vert:"varying vec3 vWorldPosition;\n#include \nvoid main() {\n\tvWorldPosition = transformDirection( position, modelMatrix );\n\t#include \n\t#include \n}\n", -linedashed_frag:"uniform vec3 diffuse;\nuniform float opacity;\nuniform float dashSize;\nuniform float totalSize;\nvarying float vLineDistance;\n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tif ( mod( vLineDistance, totalSize ) > dashSize ) {\n\t\tdiscard;\n\t}\n\tvec3 outgoingLight = vec3( 0.0 );\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include \n\t#include \n\toutgoingLight = diffuseColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -linedashed_vert:"uniform float scale;\nattribute float lineDistance;\nvarying float vLineDistance;\n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvLineDistance = scale * lineDistance;\n\tvec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );\n\tgl_Position = projectionMatrix * mvPosition;\n\t#include \n\t#include \n}\n",meshbasic_frag:"uniform vec3 diffuse;\nuniform float opacity;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tReflectedLight reflectedLight;\n\treflectedLight.directDiffuse = vec3( 0.0 );\n\treflectedLight.directSpecular = vec3( 0.0 );\n\treflectedLight.indirectDiffuse = diffuseColor.rgb;\n\treflectedLight.indirectSpecular = vec3( 0.0 );\n\t#include \n\tvec3 outgoingLight = reflectedLight.indirectDiffuse;\n\t#include \n\t#include \n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshbasic_vert:"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#ifdef USE_ENVMAP\n\t#include \n\t#include \n\t#include \n\t#include \n\t#endif\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshlambert_frag:"uniform vec3 diffuse;\nuniform vec3 emissive;\nuniform float opacity;\nvarying vec3 vLightFront;\n#ifdef DOUBLE_SIDED\n\tvarying vec3 vLightBack;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\treflectedLight.indirectDiffuse = getAmbientLightIrradiance( ambientLightColor );\n\t#include \n\treflectedLight.indirectDiffuse *= BRDF_Diffuse_Lambert( diffuseColor.rgb );\n\t#ifdef DOUBLE_SIDED\n\t\treflectedLight.directDiffuse = ( gl_FrontFacing ) ? vLightFront : vLightBack;\n\t#else\n\t\treflectedLight.directDiffuse = vLightFront;\n\t#endif\n\treflectedLight.directDiffuse *= BRDF_Diffuse_Lambert( diffuseColor.rgb ) * getShadowMask();\n\t#include \n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + totalEmissiveRadiance;\n\t#include \n\t#include \n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshlambert_vert:"#define LAMBERT\nvarying vec3 vLightFront;\n#ifdef DOUBLE_SIDED\n\tvarying vec3 vLightBack;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshphong_frag:"#define PHONG\nuniform vec3 diffuse;\nuniform vec3 emissive;\nuniform vec3 specular;\nuniform float shininess;\nuniform float opacity;\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + reflectedLight.directSpecular + reflectedLight.indirectSpecular + totalEmissiveRadiance;\n\t#include \n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshphong_vert:"#define PHONG\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n#ifndef FLAT_SHADED\n\tvNormal = normalize( transformedNormal );\n#endif\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tvViewPosition = - mvPosition.xyz;\n\t#include \n\t#include \n\t#include \n}\n", -meshphysical_frag:"#define PHYSICAL\nuniform vec3 diffuse;\nuniform vec3 emissive;\nuniform float roughness;\nuniform float metalness;\nuniform float opacity;\n#ifndef STANDARD\n\tuniform float clearCoat;\n\tuniform float clearCoatRoughness;\n#endif\nuniform float envMapIntensity;\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\tReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );\n\tvec3 totalEmissiveRadiance = emissive;\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tvec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + reflectedLight.directSpecular + reflectedLight.indirectSpecular + totalEmissiveRadiance;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -meshphysical_vert:"#define PHYSICAL\nvarying vec3 vViewPosition;\n#ifndef FLAT_SHADED\n\tvarying vec3 vNormal;\n#endif\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n#ifndef FLAT_SHADED\n\tvNormal = normalize( transformedNormal );\n#endif\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n\tvViewPosition = - mvPosition.xyz;\n\t#include \n\t#include \n}\n", -normal_frag:"uniform float opacity;\nvarying vec3 vNormal;\n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tgl_FragColor = vec4( packNormalToRGB( vNormal ), opacity );\n\t#include \n}\n",normal_vert:"varying vec3 vNormal;\n#include \n#include \n#include \n#include \nvoid main() {\n\tvNormal = normalize( normalMatrix * normal );\n\t#include \n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -points_frag:"uniform vec3 diffuse;\nuniform float opacity;\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\tvec3 outgoingLight = vec3( 0.0 );\n\tvec4 diffuseColor = vec4( diffuse, opacity );\n\t#include \n\t#include \n\t#include \n\t#include \n\toutgoingLight = diffuseColor.rgb;\n\tgl_FragColor = vec4( outgoingLight, diffuseColor.a );\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -points_vert:"uniform float size;\nuniform float scale;\n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#ifdef USE_SIZEATTENUATION\n\t\tgl_PointSize = size * ( scale / - mvPosition.z );\n\t#else\n\t\tgl_PointSize = size;\n\t#endif\n\t#include \n\t#include \n\t#include \n\t#include \n}\n", -shadow_frag:"uniform float opacity;\n#include \n#include \n#include \n#include \n#include \n#include \nvoid main() {\n\tgl_FragColor = vec4( 0.0, 0.0, 0.0, opacity * ( 1.0 - getShadowMask() ) );\n}\n",shadow_vert:"#include \nvoid main() {\n\t#include \n\t#include \n\t#include \n\t#include \n}\n"};O.prototype={constructor:O, -isColor:!0,r:1,g:1,b:1,set:function(a){a&&a.isColor?this.copy(a):"number"===typeof a?this.setHex(a):"string"===typeof a&&this.setStyle(a);return this},setScalar:function(a){this.b=this.g=this.r=a;return this},setHex:function(a){a=Math.floor(a);this.r=(a>>16&255)/255;this.g=(a>>8&255)/255;this.b=(a&255)/255;return this},setRGB:function(a,b,c){this.r=a;this.g=b;this.b=c;return this},setHSL:function(){function a(a,c,d){0>d&&(d+=1);1d?c:d<2/3?a+6*(c-a)*(2/3-d):a}return function(b, -c,d){b=T.euclideanModulo(b,1);c=T.clamp(c,0,1);d=T.clamp(d,0,1);0===c?this.r=this.g=this.b=d:(c=.5>=d?d*(1+c):d+c-d*c,d=2*d-c,this.r=a(d,c,b+1/3),this.g=a(d,c,b),this.b=a(d,c,b-1/3));return this}}(),setStyle:function(a){function b(b){void 0!==b&&1>parseFloat(b)&&console.warn("THREE.Color: Alpha component of "+a+" will be ignored.")}var c;if(c=/^((?:rgb|hsl)a?)\(\s*([^\)]*)\)/.exec(a)){var d=c[2];switch(c[1]){case "rgb":case "rgba":if(c=/^(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d))return this.r= -Math.min(255,parseInt(c[1],10))/255,this.g=Math.min(255,parseInt(c[2],10))/255,this.b=Math.min(255,parseInt(c[3],10))/255,b(c[5]),this;if(c=/^(\d+)\%\s*,\s*(\d+)\%\s*,\s*(\d+)\%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d))return this.r=Math.min(100,parseInt(c[1],10))/100,this.g=Math.min(100,parseInt(c[2],10))/100,this.b=Math.min(100,parseInt(c[3],10))/100,b(c[5]),this;break;case "hsl":case "hsla":if(c=/^([0-9]*\.?[0-9]+)\s*,\s*(\d+)\%\s*,\s*(\d+)\%\s*(,\s*([0-9]*\.?[0-9]+)\s*)?$/.exec(d)){var d=parseFloat(c[1])/ -360,e=parseInt(c[2],10)/100,f=parseInt(c[3],10)/100;b(c[5]);return this.setHSL(d,e,f)}}}else if(c=/^\#([A-Fa-f0-9]+)$/.exec(a)){c=c[1];d=c.length;if(3===d)return this.r=parseInt(c.charAt(0)+c.charAt(0),16)/255,this.g=parseInt(c.charAt(1)+c.charAt(1),16)/255,this.b=parseInt(c.charAt(2)+c.charAt(2),16)/255,this;if(6===d)return this.r=parseInt(c.charAt(0)+c.charAt(1),16)/255,this.g=parseInt(c.charAt(2)+c.charAt(3),16)/255,this.b=parseInt(c.charAt(4)+c.charAt(5),16)/255,this}a&&0=h?k/(e+f): -k/(2-e-f);switch(e){case b:g=(c-d)/k+(cthis.max.x||a.ythis.max.y?!1:!0},containsBox:function(a){return this.min.x<=a.min.x&&a.max.x<=this.max.x&& -this.min.y<=a.min.y&&a.max.y<=this.max.y?!0:!1},getParameter:function(a,b){return(b||new B).set((a.x-this.min.x)/(this.max.x-this.min.x),(a.y-this.min.y)/(this.max.y-this.min.y))},intersectsBox:function(a){return a.max.xthis.max.x||a.max.ythis.max.y?!1:!0},clampPoint:function(a,b){return(b||new B).copy(a).clamp(this.min,this.max)},distanceToPoint:function(){var a=new B;return function(b){return a.copy(b).clamp(this.min,this.max).sub(b).length()}}(),intersect:function(a){this.min.max(a.min); -this.max.min(a.max);return this},union:function(a){this.min.min(a.min);this.max.max(a.max);return this},translate:function(a){this.min.add(a);this.max.add(a);return this},equals:function(a){return a.min.equals(this.min)&&a.max.equals(this.max)}};U.prototype={constructor:U,isMaterial:!0,get needsUpdate(){return this._needsUpdate},set needsUpdate(a){!0===a&&this.update();this._needsUpdate=a},setValues:function(a){if(void 0!==a)for(var b in a){var c=a[b];if(void 0===c)console.warn("THREE.Material: '"+ -b+"' parameter is undefined.");else{var d=this[b];void 0===d?console.warn("THREE."+this.type+": '"+b+"' is not a property of this material."):d&&d.isColor?d.set(c):d&&d.isVector3&&c&&c.isVector3?d.copy(c):this[b]="overdraw"===b?Number(c):c}}},toJSON:function(a){function b(a){var b=[],c;for(c in a){var d=a[c];delete d.metadata;b.push(d)}return b}var c=void 0===a;c&&(a={textures:{},images:{}});var d={metadata:{version:4.4,type:"Material",generator:"Material.toJSON"}};d.uuid=this.uuid;d.type=this.type; -""!==this.name&&(d.name=this.name);this.color&&this.color.isColor&&(d.color=this.color.getHex());void 0!==this.roughness&&(d.roughness=this.roughness);void 0!==this.metalness&&(d.metalness=this.metalness);this.emissive&&this.emissive.isColor&&(d.emissive=this.emissive.getHex());this.specular&&this.specular.isColor&&(d.specular=this.specular.getHex());void 0!==this.shininess&&(d.shininess=this.shininess);this.map&&this.map.isTexture&&(d.map=this.map.toJSON(a).uuid);this.alphaMap&&this.alphaMap.isTexture&& -(d.alphaMap=this.alphaMap.toJSON(a).uuid);this.lightMap&&this.lightMap.isTexture&&(d.lightMap=this.lightMap.toJSON(a).uuid);this.bumpMap&&this.bumpMap.isTexture&&(d.bumpMap=this.bumpMap.toJSON(a).uuid,d.bumpScale=this.bumpScale);this.normalMap&&this.normalMap.isTexture&&(d.normalMap=this.normalMap.toJSON(a).uuid,d.normalScale=this.normalScale.toArray());this.displacementMap&&this.displacementMap.isTexture&&(d.displacementMap=this.displacementMap.toJSON(a).uuid,d.displacementScale=this.displacementScale, -d.displacementBias=this.displacementBias);this.roughnessMap&&this.roughnessMap.isTexture&&(d.roughnessMap=this.roughnessMap.toJSON(a).uuid);this.metalnessMap&&this.metalnessMap.isTexture&&(d.metalnessMap=this.metalnessMap.toJSON(a).uuid);this.emissiveMap&&this.emissiveMap.isTexture&&(d.emissiveMap=this.emissiveMap.toJSON(a).uuid);this.specularMap&&this.specularMap.isTexture&&(d.specularMap=this.specularMap.toJSON(a).uuid);this.envMap&&this.envMap.isTexture&&(d.envMap=this.envMap.toJSON(a).uuid,d.reflectivity= -this.reflectivity);void 0!==this.size&&(d.size=this.size);void 0!==this.sizeAttenuation&&(d.sizeAttenuation=this.sizeAttenuation);1!==this.blending&&(d.blending=this.blending);2!==this.shading&&(d.shading=this.shading);0!==this.side&&(d.side=this.side);0!==this.vertexColors&&(d.vertexColors=this.vertexColors);1>this.opacity&&(d.opacity=this.opacity);!0===this.transparent&&(d.transparent=this.transparent);d.depthFunc=this.depthFunc;d.depthTest=this.depthTest;d.depthWrite=this.depthWrite;0e&&(e=m);l>f&&(f=l);n>g&&(g=n)}this.min.set(b,c,d);this.max.set(e,f,g)},setFromPoints:function(a){this.makeEmpty();for(var b=0,c=a.length;bthis.max.x||a.ythis.max.y|| -a.zthis.max.z?!1:!0},containsBox:function(a){return this.min.x<=a.min.x&&a.max.x<=this.max.x&&this.min.y<=a.min.y&&a.max.y<=this.max.y&&this.min.z<=a.min.z&&a.max.z<=this.max.z?!0:!1},getParameter:function(a,b){return(b||new q).set((a.x-this.min.x)/(this.max.x-this.min.x),(a.y-this.min.y)/(this.max.y-this.min.y),(a.z-this.min.z)/(this.max.z-this.min.z))},intersectsBox:function(a){return a.max.xthis.max.x||a.max.ythis.max.y||a.max.zthis.max.z?!1:!0},intersectsSphere:function(){var a;return function(b){void 0===a&&(a=new q);this.clampPoint(b.center,a);return a.distanceToSquared(b.center)<=b.radius*b.radius}}(),intersectsPlane:function(a){var b,c;0=a.constant},clampPoint:function(a,b){return(b||new q).copy(a).clamp(this.min,this.max)},distanceToPoint:function(){var a=new q;return function(b){return a.copy(b).clamp(this.min,this.max).sub(b).length()}}(),getBoundingSphere:function(){var a=new q;return function(b){b=b||new Ca;this.getCenter(b.center);b.radius=.5*this.getSize(a).length();return b}}(),intersect:function(a){this.min.max(a.min);this.max.min(a.max); -this.isEmpty()&&this.makeEmpty();return this},union:function(a){this.min.min(a.min);this.max.max(a.max);return this},applyMatrix4:function(){var a=[new q,new q,new q,new q,new q,new q,new q,new q];return function(b){if(this.isEmpty())return this;a[0].set(this.min.x,this.min.y,this.min.z).applyMatrix4(b);a[1].set(this.min.x,this.min.y,this.max.z).applyMatrix4(b);a[2].set(this.min.x,this.max.y,this.min.z).applyMatrix4(b);a[3].set(this.min.x,this.max.y,this.max.z).applyMatrix4(b);a[4].set(this.max.x, -this.min.y,this.min.z).applyMatrix4(b);a[5].set(this.max.x,this.min.y,this.max.z).applyMatrix4(b);a[6].set(this.max.x,this.max.y,this.min.z).applyMatrix4(b);a[7].set(this.max.x,this.max.y,this.max.z).applyMatrix4(b);this.setFromPoints(a);return this}}(),translate:function(a){this.min.add(a);this.max.add(a);return this},equals:function(a){return a.min.equals(this.min)&&a.max.equals(this.max)}};Ca.prototype={constructor:Ca,set:function(a,b){this.center.copy(a);this.radius=b;return this},setFromPoints:function(){var a= -new Ba;return function(b,c){var d=this.center;void 0!==c?d.copy(c):a.setFromPoints(b).getCenter(d);for(var e=0,f=0,g=b.length;f=this.radius},containsPoint:function(a){return a.distanceToSquared(this.center)<=this.radius*this.radius},distanceToPoint:function(a){return a.distanceTo(this.center)- -this.radius},intersectsSphere:function(a){var b=this.radius+a.radius;return a.center.distanceToSquared(this.center)<=b*b},intersectsBox:function(a){return a.intersectsSphere(this)},intersectsPlane:function(a){return Math.abs(this.center.dot(a.normal)-a.constant)<=this.radius},clampPoint:function(a,b){var c=this.center.distanceToSquared(a),d=b||new q;d.copy(a);c>this.radius*this.radius&&(d.sub(this.center).normalize(),d.multiplyScalar(this.radius).add(this.center));return d},getBoundingBox:function(a){a= -a||new Ba;a.set(this.center,this.center);a.expandByScalar(this.radius);return a},applyMatrix4:function(a){this.center.applyMatrix4(a);this.radius*=a.getMaxScaleOnAxis();return this},translate:function(a){this.center.add(a);return this},equals:function(a){return a.center.equals(this.center)&&a.radius===this.radius}};Ia.prototype={constructor:Ia,isMatrix3:!0,set:function(a,b,c,d,e,f,g,h,k){var m=this.elements;m[0]=a;m[1]=d;m[2]=g;m[3]=b;m[4]=e;m[5]=h;m[6]=c;m[7]=f;m[8]=k;return this},identity:function(){this.set(1, -0,0,0,1,0,0,0,1);return this},clone:function(){return(new this.constructor).fromArray(this.elements)},copy:function(a){a=a.elements;this.set(a[0],a[3],a[6],a[1],a[4],a[7],a[2],a[5],a[8]);return this},setFromMatrix4:function(a){a=a.elements;this.set(a[0],a[4],a[8],a[1],a[5],a[9],a[2],a[6],a[10]);return this},applyToVector3Array:function(){var a;return function(b,c,d){void 0===a&&(a=new q);void 0===c&&(c=0);void 0===d&&(d=b.length);for(var e=0;ec;c++)this.elements[c]=a[c+b];return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);var c=this.elements;a[b]=c[0];a[b+1]=c[1];a[b+2]=c[2];a[b+3]=c[3];a[b+4]=c[4];a[b+5]=c[5];a[b+6]=c[6];a[b+7]=c[7];a[b+8]=c[8];return a}};va.prototype={constructor:va,set:function(a,b){this.normal.copy(a); -this.constant=b;return this},setComponents:function(a,b,c,d){this.normal.set(a,b,c);this.constant=d;return this},setFromNormalAndCoplanarPoint:function(a,b){this.normal.copy(a);this.constant=-b.dot(this.normal);return this},setFromCoplanarPoints:function(){var a=new q,b=new q;return function(c,d,e){d=a.subVectors(e,d).cross(b.subVectors(c,d)).normalize();this.setFromNormalAndCoplanarPoint(d,c);return this}}(),clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.normal.copy(a.normal); -this.constant=a.constant;return this},normalize:function(){var a=1/this.normal.length();this.normal.multiplyScalar(a);this.constant*=a;return this},negate:function(){this.constant*=-1;this.normal.negate();return this},distanceToPoint:function(a){return this.normal.dot(a)+this.constant},distanceToSphere:function(a){return this.distanceToPoint(a.center)-a.radius},projectPoint:function(a,b){return this.orthoPoint(a,b).sub(a).negate()},orthoPoint:function(a,b){var c=this.distanceToPoint(a);return(b|| -new q).copy(this.normal).multiplyScalar(c)},intersectLine:function(){var a=new q;return function(b,c){var d=c||new q,e=b.delta(a),f=this.normal.dot(e);if(0===f){if(0===this.distanceToPoint(b.start))return d.copy(b.start)}else return f=-(b.start.dot(this.normal)+this.constant)/f,0>f||1b&&0a&&0c;c++)b[c].copy(a.planes[c]);return this},setFromMatrix:function(a){var b=this.planes,c=a.elements;a=c[0];var d=c[1],e=c[2],f=c[3],g=c[4],h=c[5],k=c[6],m=c[7],l=c[8],n=c[9],p=c[10],r=c[11],q=c[12],t=c[13],D=c[14],c=c[15]; -b[0].setComponents(f-a,m-g,r-l,c-q).normalize();b[1].setComponents(f+a,m+g,r+l,c+q).normalize();b[2].setComponents(f+d,m+h,r+n,c+t).normalize();b[3].setComponents(f-d,m-h,r-n,c-t).normalize();b[4].setComponents(f-e,m-k,r-p,c-D).normalize();b[5].setComponents(f+e,m+k,r+p,c+D).normalize();return this},intersectsObject:function(){var a=new Ca;return function(b){var c=b.geometry;null===c.boundingSphere&&c.computeBoundingSphere();a.copy(c.boundingSphere).applyMatrix4(b.matrixWorld);return this.intersectsSphere(a)}}(), -intersectsSprite:function(){var a=new Ca;return function(b){a.center.set(0,0,0);a.radius=.7071067811865476;a.applyMatrix4(b.matrixWorld);return this.intersectsSphere(a)}}(),intersectsSphere:function(a){var b=this.planes,c=a.center;a=-a.radius;for(var d=0;6>d;d++)if(b[d].distanceToPoint(c)e;e++){var f=d[e];a.x=0g&&0>f)return!1}return!0}}(),containsPoint:function(a){for(var b=this.planes,c=0;6>c;c++)if(0>b[c].distanceToPoint(a))return!1;return!0}};ab.prototype={constructor:ab,set:function(a,b){this.origin.copy(a);this.direction.copy(b);return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.origin.copy(a.origin); -this.direction.copy(a.direction);return this},at:function(a,b){return(b||new q).copy(this.direction).multiplyScalar(a).add(this.origin)},lookAt:function(a){this.direction.copy(a).sub(this.origin).normalize();return this},recast:function(){var a=new q;return function(b){this.origin.copy(this.at(b,a));return this}}(),closestPointToPoint:function(a,b){var c=b||new q;c.subVectors(a,this.origin);var d=c.dot(this.direction);return 0>d?c.copy(this.origin):c.copy(this.direction).multiplyScalar(d).add(this.origin)}, -distanceToPoint:function(a){return Math.sqrt(this.distanceSqToPoint(a))},distanceSqToPoint:function(){var a=new q;return function(b){var c=a.subVectors(b,this.origin).dot(this.direction);if(0>c)return this.origin.distanceToSquared(b);a.copy(this.direction).multiplyScalar(c).add(this.origin);return a.distanceToSquared(b)}}(),distanceSqToSegment:function(){var a=new q,b=new q,c=new q;return function(d,e,f,g){a.copy(d).add(e).multiplyScalar(.5);b.copy(e).sub(d).normalize();c.copy(this.origin).sub(a); -var h=.5*d.distanceTo(e),k=-this.direction.dot(b),m=c.dot(this.direction),l=-c.dot(b),n=c.lengthSq(),p=Math.abs(1-k*k),r;0=-r?e<=r?(h=1/p,d*=h,e*=h,k=d*(d+k*e+2*m)+e*(k*d+e+2*l)+n):(e=h,d=Math.max(0,-(k*e+m)),k=-d*d+e*(e+2*l)+n):(e=-h,d=Math.max(0,-(k*e+m)),k=-d*d+e*(e+2*l)+n):e<=-r?(d=Math.max(0,-(-k*h+m)),e=0f)return null;f=Math.sqrt(f-e);e=d-f;d+=f;return 0>e&&0>d?null:0>e?this.at(d,c):this.at(e,c)}}(),intersectsSphere:function(a){return this.distanceToPoint(a.center)<= -a.radius},distanceToPlane:function(a){var b=a.normal.dot(this.direction);if(0===b)return 0===a.distanceToPoint(this.origin)?0:null;a=-(this.origin.dot(a.normal)+a.constant)/b;return 0<=a?a:null},intersectPlane:function(a,b){var c=this.distanceToPlane(a);return null===c?null:this.at(c,b)},intersectsPlane:function(a){var b=a.distanceToPoint(this.origin);return 0===b||0>a.normal.dot(this.direction)*b?!0:!1},intersectBox:function(a,b){var c,d,e,f,g;d=1/this.direction.x;f=1/this.direction.y;g=1/this.direction.z; -var h=this.origin;0<=d?(c=(a.min.x-h.x)*d,d*=a.max.x-h.x):(c=(a.max.x-h.x)*d,d*=a.min.x-h.x);0<=f?(e=(a.min.y-h.y)*f,f*=a.max.y-h.y):(e=(a.max.y-h.y)*f,f*=a.min.y-h.y);if(c>f||e>d)return null;if(e>c||c!==c)c=e;if(fg||e>d)return null;if(e>c||c!==c)c=e;if(gd?null:this.at(0<=c?c:d,b)},intersectsBox:function(){var a=new q;return function(b){return null!==this.intersectBox(b,a)}}(),intersectTriangle:function(){var a= -new q,b=new q,c=new q,d=new q;return function(e,f,g,h,k){b.subVectors(f,e);c.subVectors(g,e);d.crossVectors(b,c);f=this.direction.dot(d);if(0f)h=-1,f=-f;else return null;a.subVectors(this.origin,e);e=h*this.direction.dot(c.crossVectors(a,c));if(0>e)return null;g=h*this.direction.dot(b.cross(a));if(0>g||e+g>f)return null;e=-h*a.dot(d);return 0>e?null:this.at(e/f,k)}}(),applyMatrix4:function(a){this.direction.add(this.origin).applyMatrix4(a);this.origin.applyMatrix4(a); -this.direction.sub(this.origin);this.direction.normalize();return this},equals:function(a){return a.origin.equals(this.origin)&&a.direction.equals(this.direction)}};bb.RotationOrders="XYZ YZX ZXY XZY YXZ ZYX".split(" ");bb.DefaultOrder="XYZ";bb.prototype={constructor:bb,isEuler:!0,get x(){return this._x},set x(a){this._x=a;this.onChangeCallback()},get y(){return this._y},set y(a){this._y=a;this.onChangeCallback()},get z(){return this._z},set z(a){this._z=a;this.onChangeCallback()},get order(){return this._order}, -set order(a){this._order=a;this.onChangeCallback()},set:function(a,b,c,d){this._x=a;this._y=b;this._z=c;this._order=d||this._order;this.onChangeCallback();return this},clone:function(){return new this.constructor(this._x,this._y,this._z,this._order)},copy:function(a){this._x=a._x;this._y=a._y;this._z=a._z;this._order=a._order;this.onChangeCallback();return this},setFromRotationMatrix:function(a,b,c){var d=T.clamp,e=a.elements;a=e[0];var f=e[4],g=e[8],h=e[1],k=e[5],m=e[9],l=e[2],n=e[6],e=e[10];b=b|| -this._order;"XYZ"===b?(this._y=Math.asin(d(g,-1,1)),.99999>Math.abs(g)?(this._x=Math.atan2(-m,e),this._z=Math.atan2(-f,a)):(this._x=Math.atan2(n,k),this._z=0)):"YXZ"===b?(this._x=Math.asin(-d(m,-1,1)),.99999>Math.abs(m)?(this._y=Math.atan2(g,e),this._z=Math.atan2(h,k)):(this._y=Math.atan2(-l,a),this._z=0)):"ZXY"===b?(this._x=Math.asin(d(n,-1,1)),.99999>Math.abs(n)?(this._y=Math.atan2(-l,e),this._z=Math.atan2(-f,k)):(this._y=0,this._z=Math.atan2(h,a))):"ZYX"===b?(this._y=Math.asin(-d(l,-1,1)),.99999> -Math.abs(l)?(this._x=Math.atan2(n,e),this._z=Math.atan2(h,a)):(this._x=0,this._z=Math.atan2(-f,k))):"YZX"===b?(this._z=Math.asin(d(h,-1,1)),.99999>Math.abs(h)?(this._x=Math.atan2(-m,k),this._y=Math.atan2(-l,a)):(this._x=0,this._y=Math.atan2(g,e))):"XZY"===b?(this._z=Math.asin(-d(f,-1,1)),.99999>Math.abs(f)?(this._x=Math.atan2(n,k),this._y=Math.atan2(g,a)):(this._x=Math.atan2(-m,e),this._y=0)):console.warn("THREE.Euler: .setFromRotationMatrix() given unsupported order: "+b);this._order=b;if(!1!==c)this.onChangeCallback(); -return this},setFromQuaternion:function(){var a;return function(b,c,d){void 0===a&&(a=new J);a.makeRotationFromQuaternion(b);return this.setFromRotationMatrix(a,c,d)}}(),setFromVector3:function(a,b){return this.set(a.x,a.y,a.z,b||this._order)},reorder:function(){var a=new ba;return function(b){a.setFromEuler(this);return this.setFromQuaternion(a,b)}}(),equals:function(a){return a._x===this._x&&a._y===this._y&&a._z===this._z&&a._order===this._order},fromArray:function(a){this._x=a[0];this._y=a[1]; -this._z=a[2];void 0!==a[3]&&(this._order=a[3]);this.onChangeCallback();return this},toArray:function(a,b){void 0===a&&(a=[]);void 0===b&&(b=0);a[b]=this._x;a[b+1]=this._y;a[b+2]=this._z;a[b+3]=this._order;return a},toVector3:function(a){return a?a.set(this._x,this._y,this._z):new q(this._x,this._y,this._z)},onChange:function(a){this.onChangeCallback=a;return this},onChangeCallback:function(){}};Yc.prototype={constructor:Yc,set:function(a){this.mask=1<=b.x+b.y}}();wa.prototype={constructor:wa,set:function(a,b,c){this.a.copy(a);this.b.copy(b);this.c.copy(c);return this},setFromPointsAndIndices:function(a,b,c,d){this.a.copy(a[b]);this.b.copy(a[c]);this.c.copy(a[d]);return this},clone:function(){return(new this.constructor).copy(this)},copy:function(a){this.a.copy(a.a);this.b.copy(a.b);this.c.copy(a.c); -return this},area:function(){var a=new q,b=new q;return function(){a.subVectors(this.c,this.b);b.subVectors(this.a,this.b);return.5*a.cross(b).length()}}(),midpoint:function(a){return(a||new q).addVectors(this.a,this.b).add(this.c).multiplyScalar(1/3)},normal:function(a){return wa.normal(this.a,this.b,this.c,a)},plane:function(a){return(a||new va).setFromCoplanarPoints(this.a,this.b,this.c)},barycoordFromPoint:function(a,b){return wa.barycoordFromPoint(a,this.a,this.b,this.c,b)},containsPoint:function(a){return wa.containsPoint(a, -this.a,this.b,this.c)},closestPointToPoint:function(){var a,b,c,d;return function(e,f){void 0===a&&(a=new va,b=[new gb,new gb,new gb],c=new q,d=new q);var g=f||new q,h=Infinity;a.setFromCoplanarPoints(this.a,this.b,this.c);a.projectPoint(e,c);if(!0===this.containsPoint(c))g.copy(c);else{b[0].set(this.a,this.b);b[1].set(this.b,this.c);b[2].set(this.c,this.a);for(var k=0;kd;d++)if(e[d]===e[(d+1)%3]){a.push(f);break}for(f=a.length-1;0<=f;f--)for(e=a[f],this.faces.splice(e,1),c=0,g=this.faceVertexUvs.length;cb.far?null:{distance:c,point:u.clone(),object:a}}function c(c,d,e,f,m,l,n,w){g.fromArray(f,3*l);h.fromArray(f,3*n);k.fromArray(f, -3*w);if(c=b(c,d,e,g,h,k,D))m&&(p.fromArray(m,2*l),r.fromArray(m,2*n),x.fromArray(m,2*w),c.uv=a(D,g,h,k,p,r,x)),c.face=new ea(l,n,w,wa.normal(g,h,k)),c.faceIndex=l;return c}var d=new J,e=new ab,f=new Ca,g=new q,h=new q,k=new q,m=new q,l=new q,n=new q,p=new B,r=new B,x=new B,t=new q,D=new q,u=new q;return function(q,t){var u=this.geometry,E=this.material,H=this.matrixWorld;if(void 0!==E&&(null===u.boundingSphere&&u.computeBoundingSphere(),f.copy(u.boundingSphere),f.applyMatrix4(H),!1!==q.ray.intersectsSphere(f)&& -(d.getInverse(H),e.copy(q.ray).applyMatrix4(d),null===u.boundingBox||!1!==e.intersectsBox(u.boundingBox)))){var F,M;if(u&&u.isBufferGeometry){var B,K,E=u.index,H=u.attributes,u=H.position.array;void 0!==H.uv&&(F=H.uv.array);if(null!==E)for(var H=E.array,z=0,C=H.length;zthis.scale.x*this.scale.y/4||c.push({distance:Math.sqrt(d),point:this.position, -face:null,object:this})}}(),clone:function(){return(new this.constructor(this.material)).copy(this)}});rc.prototype=Object.assign(Object.create(z.prototype),{constructor:rc,copy:function(a){z.prototype.copy.call(this,a,!1);a=a.levels;for(var b=0,c=a.length;b=d[e].distance)d[e- -1].object.visible=!1,d[e].object.visible=!0;else break;for(;ef||(l.applyMatrix4(this.matrixWorld),t=d.ray.origin.distanceTo(l),td.far||e.push({distance:t,point:h.clone().applyMatrix4(this.matrixWorld),index:g,face:null,faceIndex:null,object:this}))}else for(g=0,x=r.length/3-1;gf||(l.applyMatrix4(this.matrixWorld),t=d.ray.origin.distanceTo(l),td.far||e.push({distance:t,point:h.clone().applyMatrix4(this.matrixWorld), -index:g,face:null,faceIndex:null,object:this}))}else if(g&&g.isGeometry)for(k=g.vertices,m=k.length,g=0;gf||(l.applyMatrix4(this.matrixWorld),t=d.ray.origin.distanceTo(l),td.far||e.push({distance:t,point:h.clone().applyMatrix4(this.matrixWorld),index:g,face:null,faceIndex:null,object:this}))}}}(),clone:function(){return(new this.constructor(this.geometry,this.material)).copy(this)}});la.prototype=Object.assign(Object.create(Ta.prototype), -{constructor:la,isLineSegments:!0});xa.prototype=Object.create(U.prototype);xa.prototype.constructor=xa;xa.prototype.isPointsMaterial=!0;xa.prototype.copy=function(a){U.prototype.copy.call(this,a);this.color.copy(a.color);this.map=a.map;this.size=a.size;this.sizeAttenuation=a.sizeAttenuation;return this};Kb.prototype=Object.assign(Object.create(z.prototype),{constructor:Kb,isPoints:!0,raycast:function(){var a=new J,b=new ab,c=new Ca;return function(d,e){function f(a,c){var f=b.distanceSqToPoint(a); -if(fd.far||e.push({distance:m,distanceToRay:Math.sqrt(f),point:h.clone(),index:c,face:null,object:g})}}var g=this,h=this.geometry,k=this.matrixWorld,m=d.params.Points.threshold;null===h.boundingSphere&&h.computeBoundingSphere();c.copy(h.boundingSphere);c.applyMatrix4(k);if(!1!==d.ray.intersectsSphere(c)){a.getInverse(k);b.copy(d.ray).applyMatrix4(a);var m=m/((this.scale.x+this.scale.y+this.scale.z)/3), -l=m*m,m=new q;if(h&&h.isBufferGeometry){var n=h.index,h=h.attributes.position.array;if(null!==n)for(var p=n.array,n=0,r=p.length;nc)return null;var d=[],e=[],f=[],g,h,k;if(0=m--){console.warn("THREE.ShapeUtils: Unable to triangulate polygon! in triangulate()");break}g=h;c<=g&&(g=0);h=g+1;c<=h&&(h=0);k=h+1;c<=k&&(k=0);var l;a:{var n, -p,r,q,t,D,u,v;n=a[e[g]].x;p=a[e[g]].y;r=a[e[h]].x;q=a[e[h]].y;t=a[e[k]].x;D=a[e[k]].y;if(0>=(r-n)*(D-p)-(q-p)*(t-n))l=!1;else{var I,y,E,H,F,M,B,z,C,G;I=t-r;y=D-q;E=n-t;H=p-D;F=r-n;M=q-p;for(l=0;l=-Number.EPSILON&&z>=-Number.EPSILON&&B>=-Number.EPSILON)){l=!1;break a}l=!0}}if(l){d.push([a[e[g]],a[e[h]],a[e[k]]]);f.push([e[g],e[h],e[k]]);g=h;for(k=h+1;kNumber.EPSILON){if(0q||q>p)return[];k=m*l-k*n;if(0>k||k>p)return[]}else{if(0c?[]:k===c?f?[]:[g]:a<=c?[g,h]:[g,m]}function f(a,b,c,d){var e=b.x-a.x,f=b.y-a.y;b=c.x-a.x;c=c.y-a.y;var g=d.x-a.x;d=d.y-a.y;a=e*c-f*b;e=e*d-f*g;return Math.abs(a)>Number.EPSILON?(b=g*c-d*b,0e&&(e=d);var g=a+1;g>d&&(g=0);d=f(h[a],h[e],h[g],k[b]);if(!d)return!1;d=k.length-1;e=b-1;0>e&&(e=d);g=b+1;g>d&&(g=0);return(d=f(k[b],k[e],k[g],h[a]))?!0:!1}function d(a,b){var c,f;for(c=0;cN){console.log("Infinite Loop! Holes left:"+m.length+", Probably Hole outside Shape!");break}for(n=z;nk;k++)l=m[k].x+":"+m[k].y,l=n[l],void 0!==l&&(m[k]=l);return p.concat()},isClockWise:function(a){return 0>ra.area(a)},b2:function(){return function(a,b,c,d){var e=1-a;return e*e*b+2*(1-a)*a*c+a*a*d}}(),b3:function(){return function(a,b,c,d,e){var f=1-a,g=1-a;return f*f*f*b+3*g*g*a*c+3*(1-a)*a*a*d+a*a*a*e}}()};za.prototype=Object.create(Q.prototype);za.prototype.constructor= -za;za.prototype.addShapeList=function(a,b){for(var c=a.length,d=0;dNumber.EPSILON){var k=Math.sqrt(h),m=Math.sqrt(d*d+g*g),h=b.x-f/k;b=b.y+e/k;g=((c.x-g/m-h)*g-(c.y+d/m-b)*d)/(e*g-f*d);d=h+e*g-a.x;e=b+f*g-a.y;f= -d*d+e*e;if(2>=f)return new B(d,e);f=Math.sqrt(f/2)}else a=!1,e>Number.EPSILON?d>Number.EPSILON&&(a=!0):e<-Number.EPSILON?d<-Number.EPSILON&&(a=!0):Math.sign(f)===Math.sign(g)&&(a=!0),a?(d=-f,f=Math.sqrt(h)):(d=e,e=f,f=Math.sqrt(h/2));return new B(d/f,e/f)}function e(a,b){var c,d;for(L=a.length;0<=--L;){c=L;d=L-1;0>d&&(d=a.length-1);var e,f=r+2*l;for(e=0;eMath.abs(b.y-c.y)?[new B(b.x,1-b.z),new B(c.x,1-c.z),new B(d.x,1-d.z),new B(e.x,1-e.z)]:[new B(b.y,1-b.z),new B(c.y,1-c.z),new B(d.y,1-d.z),new B(e.y, -1-e.z)]}};Dc.prototype=Object.create(za.prototype);Dc.prototype.constructor=Dc;mb.prototype=Object.create(G.prototype);mb.prototype.constructor=mb;Vb.prototype=Object.create(Q.prototype);Vb.prototype.constructor=Vb;Wb.prototype=Object.create(G.prototype);Wb.prototype.constructor=Wb;Ec.prototype=Object.create(Q.prototype);Ec.prototype.constructor=Ec;Fc.prototype=Object.create(Q.prototype);Fc.prototype.constructor=Fc;Xb.prototype=Object.create(G.prototype);Xb.prototype.constructor=Xb;Gc.prototype=Object.create(Q.prototype); -Gc.prototype.constructor=Gc;cb.prototype=Object.create(Q.prototype);cb.prototype.constructor=cb;cb.prototype.addShapeList=function(a,b){for(var c=0,d=a.length;c=e)break a;else{f=b[1];a=e)break b}d=c;c=0}}for(;c>>1,ab;)--f;++f;if(0!==e||f!==d)e>=f&&(f=Math.max(f,1),e=f-1),d=this.getValueSize(),this.times=ma.arraySlice(c,e,f),this.values=ma.arraySlice(this.values,e*d,f*d);return this},validate:function(){var a=!0,b=this.getValueSize();0!==b-Math.floor(b)&&(console.error("invalid value size in track",this),a=!1);var c=this.times,b=this.values,d=c.length;0===d&&(console.error("track is empty", -this),a=!1);for(var e=null,f=0;f!==d;f++){var g=c[f];if("number"===typeof g&&isNaN(g)){console.error("time is not a valid number",this,f,g);a=!1;break}if(null!==e&&e>g){console.error("out of order keys",this,f,g,e);a=!1;break}e=g}if(void 0!==b&&ma.isTypedArray(b))for(f=0,c=b.length;f!==c;++f)if(d=b[f],isNaN(d)){console.error("value is not a valid number",this,f,d);a=!1;break}return a},optimize:function(){for(var a=this.times,b=this.values,c=this.getValueSize(),d=2302===this.getInterpolation(),e=1, -f=a.length-1,g=1;gk.opacity&&(k.transparent=!0);c.setTextures(h);return c.parse(k)}}()};wb.Handlers={handlers:[],add:function(a,b){this.handlers.push(a,b)},get:function(a){for(var b=this.handlers,c=0,d=b.length;cg;g++)p=v[k++],u=D[2*p],p=D[2*p+1],u=new B(u,p),2!==g&&c.faceVertexUvs[d][h].push(u),0!==g&&c.faceVertexUvs[d][h+1].push(u);n&&(n=3*v[k++],r.normal.set(z[n++],z[n++],z[n]),t.normal.copy(r.normal));if(x)for(d=0;4>d;d++)n=3*v[k++],x=new q(z[n++],z[n++],z[n]),2!==d&&r.vertexNormals.push(x),0!==d&&t.vertexNormals.push(x);w&&(w=v[k++],w=y[w],r.color.setHex(w),t.color.setHex(w));if(b)for(d= -0;4>d;d++)w=v[k++],w=y[w],2!==d&&r.vertexColors.push(new O(w)),0!==d&&t.vertexColors.push(new O(w));c.faces.push(r);c.faces.push(t)}else{r=new ea;r.a=v[k++];r.b=v[k++];r.c=v[k++];h&&(h=v[k++],r.materialIndex=h);h=c.faces.length;if(d)for(d=0;dg;g++)p=v[k++],u=D[2*p],p=D[2*p+1],u=new B(u,p),c.faceVertexUvs[d][h].push(u);n&&(n=3*v[k++],r.normal.set(z[n++],z[n++],z[n]));if(x)for(d=0;3>d;d++)n=3*v[k++],x=new q(z[n++],z[n++],z[n]),r.vertexNormals.push(x); -w&&(w=v[k++],r.color.setHex(y[w]));if(b)for(d=0;3>d;d++)w=v[k++],r.vertexColors.push(new O(y[w]));c.faces.push(r)}})(d);(function(){var b=void 0!==a.influencesPerVertex?a.influencesPerVertex:2;if(a.skinWeights)for(var d=0,g=a.skinWeights.length;dk)g=d+1;else if(0b&&(b=0);1Number.EPSILON&&(g.normalize(),c=Math.acos(T.clamp(d[k-1].dot(d[k]),-1,1)),e[k].applyMatrix4(h.makeRotationAxis(g,c))),f[k].crossVectors(d[k],e[k]);if(!0===b)for(c=Math.acos(T.clamp(e[0].dot(e[a]),-1,1)),c/=a,0=b)return b=c[a]-b,a=this.curves[a],c=a.getLength(),a.getPointAt(0===c?0:1-b/c);a++}return null}, -getLength:function(){var a=this.getCurveLengths();return a[a.length-1]},updateArcLengths:function(){this.needsUpdate=!0;this.cacheLengths=null;this.getLengths()},getCurveLengths:function(){if(this.cacheLengths&&this.cacheLengths.length===this.curves.length)return this.cacheLengths;for(var a=[],b=0,c=0,d=this.curves.length;cc;)c+=b;for(;c>b;)c-=b;cb.length-2?b.length-1:c+1],b=b[c>b.length-3?b.length-1:c+2],c=Xc.interpolate;return new B(c(d.x,e.x,f.x,b.x,a),c(d.y,e.y,f.y,b.y,a))};yb.prototype=Object.create(ia.prototype); -yb.prototype.constructor=yb;yb.prototype.getPoint=function(a){var b=ra.b3;return new B(b(a,this.v0.x,this.v1.x,this.v2.x,this.v3.x),b(a,this.v0.y,this.v1.y,this.v2.y,this.v3.y))};yb.prototype.getTangent=function(a){var b=Xc.tangentCubicBezier;return(new B(b(a,this.v0.x,this.v1.x,this.v2.x,this.v3.x),b(a,this.v0.y,this.v1.y,this.v2.y,this.v3.y))).normalize()};zb.prototype=Object.create(ia.prototype);zb.prototype.constructor=zb;zb.prototype.getPoint=function(a){var b=ra.b2;return new B(b(a,this.v0.x, -this.v1.x,this.v2.x),b(a,this.v0.y,this.v1.y,this.v2.y))};zb.prototype.getTangent=function(a){var b=Xc.tangentQuadraticBezier;return(new B(b(a,this.v0.x,this.v1.x,this.v2.x),b(a,this.v0.y,this.v1.y,this.v2.y))).normalize()};var de=Object.assign(Object.create(Oc.prototype),{fromPoints:function(a){this.moveTo(a[0].x,a[0].y);for(var b=1,c=a.length;bNumber.EPSILON){if(0>l&&(g=b[f],k=-k,h=b[e],l=-l),!(a.yh.y))if(a.y=== -g.y){if(a.x===g.x)return!0}else{e=l*(a.x-g.x)-k*(a.y-g.y);if(0===e)return!0;0>e||(d=!d)}}else if(a.y===g.y&&(h.x<=a.x&&a.x<=g.x||g.x<=a.x&&a.x<=h.x))return!0}return d}var e=ra.isClockWise,f=this.subPaths;if(0===f.length)return[];if(!0===b)return c(f);var g,h,k,l=[];if(1===f.length)return h=f[0],k=new Ab,k.curves=h.curves,l.push(k),l;var q=!e(f[0].getPoints()),q=a?!q:q;k=[];var n=[],p=[],r=0,x;n[r]=void 0;p[r]=[];for(var t=0,D=f.length;td&&this._mixBufferRegion(c,a,3*b,1-d,b);for(var d=b,f=b+b;d!==f;++d)if(c[d]!==c[d+b]){e.setValue(c,a);break}},saveOriginalState:function(){var a=this.buffer,b=this.valueSize,c=3*b;this.binding.getValue(a,c);for(var d= -b;d!==c;++d)a[d]=a[c+d%b];this.cumulativeWeight=0},restoreOriginalState:function(){this.binding.setValue(this.buffer,3*this.valueSize)},_select:function(a,b,c,d,e){if(.5<=d)for(d=0;d!==e;++d)a[b+d]=a[c+d]},_slerp:function(a,b,c,d,e){ba.slerpFlat(a,b,a,b,a,c,d)},_lerp:function(a,b,c,d,e){for(var f=1-d,g=0;g!==e;++g){var h=b+g;a[h]=a[h]*f+a[c+g]*d}}};fa.prototype={constructor:fa,getValue:function(a,b){this.bind();this.getValue(a,b)},setValue:function(a,b){this.bind();this.setValue(a,b)},bind:function(){var a= -this.node,b=this.parsedPath,c=b.objectName,d=b.propertyName,e=b.propertyIndex;a||(this.node=a=fa.findNode(this.rootNode,b.nodeName)||this.rootNode);this.getValue=this._getValue_unavailable;this.setValue=this._setValue_unavailable;if(a){if(c){var f=b.objectIndex;switch(c){case "materials":if(!a.material){console.error(" can not bind to material as node does not have a material",this);return}if(!a.material.materials){console.error(" can not bind to material.materials as node.material does not have a materials array", -this);return}a=a.material.materials;break;case "bones":if(!a.skeleton){console.error(" can not bind to bones as node does not have a skeleton",this);return}a=a.skeleton.bones;for(c=0;c=c){var n=c++,p=b[n];d[p.uuid]=q;b[q]=p;d[l]=n;b[n]=k;k=0;for(l=f;k!==l;++k){var p=e[k],r=p[q];p[q]=p[n];p[n]=r}}}this.nCachedObjects_=c},uncache:function(a){for(var b=this._objects,c=b.length,d=this.nCachedObjects_,e=this._indicesByUUID,f=this._bindings,g=f.length,h=0,k=arguments.length;h!==k;++h){var l=arguments[h].uuid,q=e[l];if(void 0!== -q)if(delete e[l],qb||0===c)return;this._startTime=null;b*=c}b*=this._updateTimeScale(a);c=this._updateTime(b);a=this._updateWeight(a);if(0c.parameterPositions[1]&& -(this.stopFading(),0===d&&(this.enabled=!1))}}return this._effectiveWeight=b},_updateTimeScale:function(a){var b=0;if(!this.paused){var b=this.timeScale,c=this._timeScaleInterpolant;if(null!==c){var d=c.evaluate(a)[0],b=b*d;a>c.parameterPositions[1]&&(this.stopWarping(),0===b?this.paused=!0:this.timeScale=b)}}return this._effectiveTimeScale=b},_updateTime:function(a){var b=this.time+a;if(0===a)return b;var c=this._clip.duration,d=this.loop,e=this._loopCount;if(2200===d)a:{if(-1===e&&(this.loopCount= -0,this._setEndings(!0,!0,!1)),b>=c)b=c;else if(0>b)b=0;else break a;this.clampWhenFinished?this.paused=!0:this.enabled=!1;this._mixer.dispatchEvent({type:"finished",action:this,direction:0>a?-1:1})}else{d=2202===d;-1===e&&(0<=a?(e=0,this._setEndings(!0,0===this.repetitions,d)):this._setEndings(0===this.repetitions,!0,d));if(b>=c||0>b){var f=Math.floor(b/c),b=b-c*f,e=e+Math.abs(f),g=this.repetitions-e;0>g?(this.clampWhenFinished?this.paused=!0:this.enabled=!1,b=0a,this._setEndings(a,!a,d)):this._setEndings(!1,!1,d),this._loopCount=e,this._mixer.dispatchEvent({type:"loop",action:this,loopDelta:f}))}if(d&&1===(e&1))return this.time=b,c-b}return this.time=b},_setEndings:function(a,b,c){var d=this._interpolantSettings;c?(d.endingStart=2401,d.endingEnd=2401):(d.endingStart=a?this.zeroSlopeAtStart?2401:2400:2402,d.endingEnd=b?this.zeroSlopeAtEnd?2401:2400:2402)},_scheduleFading:function(a,b,c){var d=this._mixer,e=d.time, -f=this._weightInterpolant;null===f&&(this._weightInterpolant=f=d._lendControlInterpolant());d=f.parameterPositions;f=f.sampleValues;d[0]=e;f[0]=b;d[1]=e+a;f[1]=c;return this}};Object.assign(Ud.prototype,sa.prototype,{clipAction:function(a,b){var c=b||this._root,d=c.uuid,e="string"===typeof a?Ha.findByName(c,a):a,c=null!==e?e.uuid:a,f=this._actionsByClip[c],g=null;if(void 0!==f){g=f.actionByRoot[d];if(void 0!==g)return g;g=f.knownActions[0];null===e&&(e=g._clip)}if(null===e)return null;e=new Td(this, -e,b);this._bindAction(e,g);this._addInactiveAction(e,c,d);return e},existingAction:function(a,b){var c=b||this._root,d=c.uuid,c="string"===typeof a?Ha.findByName(c,a):a,c=this._actionsByClip[c?c.uuid:a];return void 0!==c?c.actionByRoot[d]||null:null},stopAllAction:function(){for(var a=this._actions,b=this._nActiveActions,c=this._bindings,d=this._nActiveBindings,e=this._nActiveBindings=this._nActiveActions=0;e!==b;++e)a[e].reset();for(e=0;e!==d;++e)c[e].useCount=0;return this},update:function(a){a*= -this.timeScale;for(var b=this._actions,c=this._nActiveActions,d=this.time+=a,e=Math.sign(a),f=this._accuIndex^=1,g=0;g!==c;++g){var h=b[g];h.enabled&&h._update(d,a,e,f)}a=this._bindings;b=this._nActiveBindings;for(g=0;g!==b;++g)a[g].apply(f);return this},getRoot:function(){return this._root},uncacheClip:function(a){var b=this._actions;a=a.uuid;var c=this._actionsByClip,d=c[a];if(void 0!==d){for(var d=d.knownActions,e=0,f=d.length;e!==f;++e){var g=d[e];this._deactivateAction(g);var h=g._cacheIndex, -k=b[b.length-1];g._cacheIndex=null;g._byClipCacheIndex=null;k._cacheIndex=h;b[h]=k;b.pop();this._removeInactiveBindingsForAction(g)}delete c[a]}},uncacheRoot:function(a){a=a.uuid;var b=this._actionsByClip,c;for(c in b){var d=b[c].actionByRoot[a];void 0!==d&&(this._deactivateAction(d),this._removeInactiveAction(d))}c=this._bindingsByRootAndName[a];if(void 0!==c)for(var e in c)a=c[e],a.restoreOriginalState(),this._removeInactiveBinding(a)},uncacheAction:function(a,b){var c=this.existingAction(a,b); -null!==c&&(this._deactivateAction(c),this._removeInactiveAction(c))}});Object.assign(Ud.prototype,{_bindAction:function(a,b){var c=a._localRoot||this._root,d=a._clip.tracks,e=d.length,f=a._propertyBindings,g=a._interpolants,h=c.uuid,k=this._bindingsByRootAndName,l=k[h];void 0===l&&(l={},k[h]=l);for(k=0;k!==e;++k){var q=d[k],n=q.name,p=l[n];if(void 0===p){p=f[k];if(void 0!==p){null===p._cacheIndex&&(++p.referenceCount,this._addInactiveBinding(p,h,n));continue}p=new wd(fa.create(c,n,b&&b._propertyBindings[k].binding.parsedPath), -q.ValueTypeName,q.getValueSize());++p.referenceCount;this._addInactiveBinding(p,h,n)}f[k]=p;g[k].resultBuffer=p.buffer}},_activateAction:function(a){if(!this._isActiveAction(a)){if(null===a._cacheIndex){var b=(a._localRoot||this._root).uuid,c=a._clip.uuid,d=this._actionsByClip[c];this._bindAction(a,d&&d.knownActions[0]);this._addInactiveAction(a,c,b)}b=a._propertyBindings;c=0;for(d=b.length;c!==d;++c){var e=b[c];0===e.useCount++&&(this._lendBinding(e),e.saveOriginalState())}this._lendAction(a)}}, -_deactivateAction:function(a){if(this._isActiveAction(a)){for(var b=a._propertyBindings,c=0,d=b.length;c!==d;++c){var e=b[c];0===--e.useCount&&(e.restoreOriginalState(),this._takeBackBinding(e))}this._takeBackAction(a)}},_initMemoryManager:function(){this._actions=[];this._nActiveActions=0;this._actionsByClip={};this._bindings=[];this._nActiveBindings=0;this._bindingsByRootAndName={};this._controlInterpolants=[];this._nActiveControlInterpolants=0;var a=this;this.stats={actions:{get total(){return a._actions.length}, -get inUse(){return a._nActiveActions}},bindings:{get total(){return a._bindings.length},get inUse(){return a._nActiveBindings}},controlInterpolants:{get total(){return a._controlInterpolants.length},get inUse(){return a._nActiveControlInterpolants}}}},_isActiveAction:function(a){a=a._cacheIndex;return null!==a&&ah.end&&(h.end=f);c||(c=k)}}for(k in d)h=d[k],this.createAnimation(k,h.start,h.end,a);this.firstAnimation=c};na.prototype.setAnimationDirectionForward=function(a){if(a=this.animationsMap[a])a.direction=1,a.directionBackwards=!1};na.prototype.setAnimationDirectionBackward=function(a){if(a=this.animationsMap[a])a.direction=-1,a.directionBackwards=!0};na.prototype.setAnimationFPS=function(a,b){var c= -this.animationsMap[a];c&&(c.fps=b,c.duration=(c.end-c.start)/c.fps)};na.prototype.setAnimationDuration=function(a,b){var c=this.animationsMap[a];c&&(c.duration=b,c.fps=(c.end-c.start)/c.duration)};na.prototype.setAnimationWeight=function(a,b){var c=this.animationsMap[a];c&&(c.weight=b)};na.prototype.setAnimationTime=function(a,b){var c=this.animationsMap[a];c&&(c.time=b)};na.prototype.getAnimationTime=function(a){var b=0;if(a=this.animationsMap[a])b=a.time;return b};na.prototype.getAnimationDuration= -function(a){var b=-1;if(a=this.animationsMap[a])b=a.duration;return b};na.prototype.playAnimation=function(a){var b=this.animationsMap[a];b?(b.time=0,b.active=!0):console.warn("THREE.MorphBlendMesh: animation["+a+"] undefined in .playAnimation()")};na.prototype.stopAnimation=function(a){if(a=this.animationsMap[a])a.active=!1};na.prototype.update=function(a){for(var b=0,c=this.animationsList.length;b -d.duration||0>d.time)d.direction*=-1,d.time>d.duration&&(d.time=d.duration,d.directionBackwards=!0),0>d.time&&(d.time=0,d.directionBackwards=!1)}else d.time%=d.duration,0>d.time&&(d.time+=d.duration);var f=d.start+T.clamp(Math.floor(d.time/e),0,d.length-1),g=d.weight;f!==d.currentFrame&&(this.morphTargetInfluences[d.lastFrame]=0,this.morphTargetInfluences[d.currentFrame]=1*g,this.morphTargetInfluences[f]=0,d.lastFrame=d.currentFrame,d.currentFrame=f);e=d.time%e/e;d.directionBackwards&&(e=1-e);d.currentFrame!== -d.lastFrame?(this.morphTargetInfluences[d.currentFrame]=e*g,this.morphTargetInfluences[d.lastFrame]=(1-e)*g):this.morphTargetInfluences[d.currentFrame]=g}}};Qc.prototype=Object.create(z.prototype);Qc.prototype.constructor=Qc;Qc.prototype.isImmediateRenderObject=!0;Rc.prototype=Object.create(la.prototype);Rc.prototype.constructor=Rc;Rc.prototype.update=function(){var a=new q,b=new q,c=new Ia;return function(){var d=["a","b","c"];this.object.updateMatrixWorld(!0);c.getNormalMatrix(this.object.matrixWorld); -var e=this.object.matrixWorld,f=this.geometry.attributes.position,g=this.object.geometry;if(g&&g.isGeometry)for(var h=g.vertices,k=g.faces,l=g=0,q=k.length;lc.y?this.quaternion.set(1,0,0,0):(a.set(c.z,0,-c.x).normalize(),b=Math.acos(c.y),this.quaternion.setFromAxisAngle(a,b))}}();Cb.prototype.setLength=function(a,b,c){void 0===b&&(b=.2*a);void 0===c&&(c=.2*b);this.line.scale.set(1,Math.max(0,a-b),1);this.line.updateMatrix(); -this.cone.scale.set(c,b,c);this.cone.position.y=a;this.cone.updateMatrix()};Cb.prototype.setColor=function(a){this.line.material.color.copy(a);this.cone.material.color.copy(a)};xd.prototype=Object.create(la.prototype);xd.prototype.constructor=xd;var $d=function(){function a(){}var b=new q,c=new a,d=new a,e=new a;a.prototype.init=function(a,b,c,d){this.c0=a;this.c1=c;this.c2=-3*a+3*b-2*c-d;this.c3=2*a-2*b+c+d};a.prototype.initNonuniformCatmullRom=function(a,b,c,d,e,l,n){this.init(b,c,((b-a)/e-(c-a)/ -(e+l)+(c-b)/l)*l,((c-b)/l-(d-b)/(l+n)+(d-c)/n)*l)};a.prototype.initCatmullRom=function(a,b,c,d,e){this.init(b,c,e*(c-a),e*(d-b))};a.prototype.calc=function(a){var b=a*a;return this.c0+this.c1*a+this.c2*b+this.c3*b*a};return ia.create(function(a){this.points=a||[];this.closed=!1},function(a){var g=this.points,h,k;k=g.length;2>k&&console.log("duh, you need at least 2 points");a*=k-(this.closed?0:1);h=Math.floor(a);a-=h;this.closed?h+=0h&&(h=1);1E-4>k&&(k=h);1E-4>p&&(p=h);c.initNonuniformCatmullRom(l.x,w.x,n.x,g.x,k, -h,p);d.initNonuniformCatmullRom(l.y,w.y,n.y,g.y,k,h,p);e.initNonuniformCatmullRom(l.z,w.z,n.z,g.z,k,h,p)}else"catmullrom"===this.type&&(k=void 0!==this.tension?this.tension:.5,c.initCatmullRom(l.x,w.x,n.x,g.x,k),d.initCatmullRom(l.y,w.y,n.y,g.y,k),e.initCatmullRom(l.z,w.z,n.z,g.z,k));return new q(c.calc(a),d.calc(a),e.calc(a))})}();Ee.prototype=Object.create($d.prototype);var Ef=ia.create(function(a){console.warn("THREE.SplineCurve3 will be deprecated. Please use THREE.CatmullRomCurve3");this.points= -void 0===a?[]:a},function(a){var b=this.points;a*=b.length-1;var c=Math.floor(a);a-=c;var d=b[0==c?c:c-1],e=b[c],f=b[c>b.length-2?b.length-1:c+1],b=b[c>b.length-3?b.length-1:c+2],c=Xc.interpolate;return new q(c(d.x,e.x,f.x,b.x,a),c(d.y,e.y,f.y,b.y,a),c(d.z,e.z,f.z,b.z,a))}),Ff=ia.create(function(a,b,c,d){this.v0=a;this.v1=b;this.v2=c;this.v3=d},function(a){var b=ra.b3;return new q(b(a,this.v0.x,this.v1.x,this.v2.x,this.v3.x),b(a,this.v0.y,this.v1.y,this.v2.y,this.v3.y),b(a,this.v0.z,this.v1.z,this.v2.z, -this.v3.z))}),Gf=ia.create(function(a,b,c){this.v0=a;this.v1=b;this.v2=c},function(a){var b=ra.b2;return new q(b(a,this.v0.x,this.v1.x,this.v2.x),b(a,this.v0.y,this.v1.y,this.v2.y),b(a,this.v0.z,this.v1.z,this.v2.z))}),Hf=ia.create(function(a,b){this.v1=a;this.v2=b},function(a){if(1===a)return this.v2.clone();var b=new q;b.subVectors(this.v2,this.v1);b.multiplyScalar(a);b.add(this.v1);return b});yd.prototype=Object.create(Va.prototype);yd.prototype.constructor=yd;Object.assign(mc.prototype,{center:function(a){console.warn("THREE.Box2: .center() has been renamed to .getCenter()."); -return this.getCenter(a)},empty:function(){console.warn("THREE.Box2: .empty() has been renamed to .isEmpty().");return this.isEmpty()},isIntersectionBox:function(a){console.warn("THREE.Box2: .isIntersectionBox() has been renamed to .intersectsBox().");return this.intersectsBox(a)},size:function(a){console.warn("THREE.Box2: .size() has been renamed to .getSize().");return this.getSize(a)}});Object.assign(Ba.prototype,{center:function(a){console.warn("THREE.Box3: .center() has been renamed to .getCenter()."); -return this.getCenter(a)},empty:function(){console.warn("THREE.Box3: .empty() has been renamed to .isEmpty().");return this.isEmpty()},isIntersectionBox:function(a){console.warn("THREE.Box3: .isIntersectionBox() has been renamed to .intersectsBox().");return this.intersectsBox(a)},isIntersectionSphere:function(a){console.warn("THREE.Box3: .isIntersectionSphere() has been renamed to .intersectsSphere().");return this.intersectsSphere(a)},size:function(a){console.warn("THREE.Box3: .size() has been renamed to .getSize()."); -return this.getSize(a)}});Object.assign(gb.prototype,{center:function(a){console.warn("THREE.Line3: .center() has been renamed to .getCenter().");return this.getCenter(a)}});Object.assign(Ia.prototype,{multiplyVector3:function(a){console.warn("THREE.Matrix3: .multiplyVector3() has been removed. Use vector.applyMatrix3( matrix ) instead.");return a.applyMatrix3(this)},multiplyVector3Array:function(a){console.warn("THREE.Matrix3: .multiplyVector3Array() has been renamed. Use matrix.applyToVector3Array( array ) instead."); -return this.applyToVector3Array(a)}});Object.assign(J.prototype,{extractPosition:function(a){console.warn("THREE.Matrix4: .extractPosition() has been renamed to .copyPosition().");return this.copyPosition(a)},setRotationFromQuaternion:function(a){console.warn("THREE.Matrix4: .setRotationFromQuaternion() has been renamed to .makeRotationFromQuaternion().");return this.makeRotationFromQuaternion(a)},multiplyVector3:function(a){console.warn("THREE.Matrix4: .multiplyVector3() has been removed. Use vector.applyMatrix4( matrix ) or vector.applyProjection( matrix ) instead."); -return a.applyProjection(this)},multiplyVector4:function(a){console.warn("THREE.Matrix4: .multiplyVector4() has been removed. Use vector.applyMatrix4( matrix ) instead.");return a.applyMatrix4(this)},multiplyVector3Array:function(a){console.warn("THREE.Matrix4: .multiplyVector3Array() has been renamed. Use matrix.applyToVector3Array( array ) instead.");return this.applyToVector3Array(a)},rotateAxis:function(a){console.warn("THREE.Matrix4: .rotateAxis() has been removed. Use Vector3.transformDirection( matrix ) instead."); -a.transformDirection(this)},crossVector:function(a){console.warn("THREE.Matrix4: .crossVector() has been removed. Use vector.applyMatrix4( matrix ) instead.");return a.applyMatrix4(this)},translate:function(a){console.error("THREE.Matrix4: .translate() has been removed.")},rotateX:function(a){console.error("THREE.Matrix4: .rotateX() has been removed.")},rotateY:function(a){console.error("THREE.Matrix4: .rotateY() has been removed.")},rotateZ:function(a){console.error("THREE.Matrix4: .rotateZ() has been removed.")}, -rotateByAxis:function(a,b){console.error("THREE.Matrix4: .rotateByAxis() has been removed.")}});Object.assign(va.prototype,{isIntersectionLine:function(a){console.warn("THREE.Plane: .isIntersectionLine() has been renamed to .intersectsLine().");return this.intersectsLine(a)}});Object.assign(ba.prototype,{multiplyVector3:function(a){console.warn("THREE.Quaternion: .multiplyVector3() has been removed. Use is now vector.applyQuaternion( quaternion ) instead.");return a.applyQuaternion(this)}});Object.assign(ab.prototype, -{isIntersectionBox:function(a){console.warn("THREE.Ray: .isIntersectionBox() has been renamed to .intersectsBox().");return this.intersectsBox(a)},isIntersectionPlane:function(a){console.warn("THREE.Ray: .isIntersectionPlane() has been renamed to .intersectsPlane().");return this.intersectsPlane(a)},isIntersectionSphere:function(a){console.warn("THREE.Ray: .isIntersectionSphere() has been renamed to .intersectsSphere().");return this.intersectsSphere(a)}});Object.assign(Ab.prototype,{extrude:function(a){console.warn("THREE.Shape: .extrude() has been removed. Use ExtrudeGeometry() instead."); -return new za(this,a)},makeGeometry:function(a){console.warn("THREE.Shape: .makeGeometry() has been removed. Use ShapeGeometry() instead.");return new cb(this,a)}});Object.assign(q.prototype,{setEulerFromRotationMatrix:function(){console.error("THREE.Vector3: .setEulerFromRotationMatrix() has been removed. Use Euler.setFromRotationMatrix() instead.")},setEulerFromQuaternion:function(){console.error("THREE.Vector3: .setEulerFromQuaternion() has been removed. Use Euler.setFromQuaternion() instead.")}, -getPositionFromMatrix:function(a){console.warn("THREE.Vector3: .getPositionFromMatrix() has been renamed to .setFromMatrixPosition().");return this.setFromMatrixPosition(a)},getScaleFromMatrix:function(a){console.warn("THREE.Vector3: .getScaleFromMatrix() has been renamed to .setFromMatrixScale().");return this.setFromMatrixScale(a)},getColumnFromMatrix:function(a,b){console.warn("THREE.Vector3: .getColumnFromMatrix() has been renamed to .setFromMatrixColumn().");return this.setFromMatrixColumn(b, -a)}});Object.assign(z.prototype,{getChildByName:function(a){console.warn("THREE.Object3D: .getChildByName() has been renamed to .getObjectByName().");return this.getObjectByName(a)},renderDepth:function(a){console.warn("THREE.Object3D: .renderDepth has been removed. Use .renderOrder, instead.")},translate:function(a,b){console.warn("THREE.Object3D: .translate() has been removed. Use .translateOnAxis( axis, distance ) instead.");return this.translateOnAxis(b,a)}});Object.defineProperties(z.prototype, -{eulerOrder:{get:function(){console.warn("THREE.Object3D: .eulerOrder is now .rotation.order.");return this.rotation.order},set:function(a){console.warn("THREE.Object3D: .eulerOrder is now .rotation.order.");this.rotation.order=a}},useQuaternion:{get:function(){console.warn("THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.")},set:function(a){console.warn("THREE.Object3D: .useQuaternion has been removed. The library now uses quaternions by default.")}}}); -Object.defineProperties(rc.prototype,{objects:{get:function(){console.warn("THREE.LOD: .objects has been renamed to .levels.");return this.levels}}});Ea.prototype.setLens=function(a,b){console.warn("THREE.PerspectiveCamera.setLens is deprecated. Use .setFocalLength and .filmGauge for a photographic setup.");void 0!==b&&(this.filmGauge=b);this.setFocalLength(a)};Object.defineProperties(pa.prototype,{onlyShadow:{set:function(a){console.warn("THREE.Light: .onlyShadow has been removed.")}},shadowCameraFov:{set:function(a){console.warn("THREE.Light: .shadowCameraFov is now .shadow.camera.fov."); -this.shadow.camera.fov=a}},shadowCameraLeft:{set:function(a){console.warn("THREE.Light: .shadowCameraLeft is now .shadow.camera.left.");this.shadow.camera.left=a}},shadowCameraRight:{set:function(a){console.warn("THREE.Light: .shadowCameraRight is now .shadow.camera.right.");this.shadow.camera.right=a}},shadowCameraTop:{set:function(a){console.warn("THREE.Light: .shadowCameraTop is now .shadow.camera.top.");this.shadow.camera.top=a}},shadowCameraBottom:{set:function(a){console.warn("THREE.Light: .shadowCameraBottom is now .shadow.camera.bottom."); -this.shadow.camera.bottom=a}},shadowCameraNear:{set:function(a){console.warn("THREE.Light: .shadowCameraNear is now .shadow.camera.near.");this.shadow.camera.near=a}},shadowCameraFar:{set:function(a){console.warn("THREE.Light: .shadowCameraFar is now .shadow.camera.far.");this.shadow.camera.far=a}},shadowCameraVisible:{set:function(a){console.warn("THREE.Light: .shadowCameraVisible has been removed. Use new THREE.CameraHelper( light.shadow.camera ) instead.")}},shadowBias:{set:function(a){console.warn("THREE.Light: .shadowBias is now .shadow.bias."); -this.shadow.bias=a}},shadowDarkness:{set:function(a){console.warn("THREE.Light: .shadowDarkness has been removed.")}},shadowMapWidth:{set:function(a){console.warn("THREE.Light: .shadowMapWidth is now .shadow.mapSize.width.");this.shadow.mapSize.width=a}},shadowMapHeight:{set:function(a){console.warn("THREE.Light: .shadowMapHeight is now .shadow.mapSize.height.");this.shadow.mapSize.height=a}}});Object.defineProperties(C.prototype,{length:{get:function(){console.warn("THREE.BufferAttribute: .length has been deprecated. Please use .count."); -return this.array.length}}});Object.assign(G.prototype,{addIndex:function(a){console.warn("THREE.BufferGeometry: .addIndex() has been renamed to .setIndex().");this.setIndex(a)},addDrawCall:function(a,b,c){void 0!==c&&console.warn("THREE.BufferGeometry: .addDrawCall() no longer supports indexOffset.");console.warn("THREE.BufferGeometry: .addDrawCall() is now .addGroup().");this.addGroup(a,b)},clearDrawCalls:function(){console.warn("THREE.BufferGeometry: .clearDrawCalls() is now .clearGroups()."); -this.clearGroups()},computeTangents:function(){console.warn("THREE.BufferGeometry: .computeTangents() has been removed.")},computeOffsets:function(){console.warn("THREE.BufferGeometry: .computeOffsets() has been removed.")}});Object.defineProperties(G.prototype,{drawcalls:{get:function(){console.error("THREE.BufferGeometry: .drawcalls has been renamed to .groups.");return this.groups}},offsets:{get:function(){console.warn("THREE.BufferGeometry: .offsets has been renamed to .groups.");return this.groups}}}); -Object.defineProperties(U.prototype,{wrapAround:{get:function(){console.warn("THREE."+this.type+": .wrapAround has been removed.")},set:function(a){console.warn("THREE."+this.type+": .wrapAround has been removed.")}},wrapRGB:{get:function(){console.warn("THREE."+this.type+": .wrapRGB has been removed.");return new O}}});Object.defineProperties(db.prototype,{metal:{get:function(){console.warn("THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead.");return!1},set:function(a){console.warn("THREE.MeshPhongMaterial: .metal has been removed. Use THREE.MeshStandardMaterial instead")}}}); -Object.defineProperties(Fa.prototype,{derivatives:{get:function(){console.warn("THREE.ShaderMaterial: .derivatives has been moved to .extensions.derivatives.");return this.extensions.derivatives},set:function(a){console.warn("THREE. ShaderMaterial: .derivatives has been moved to .extensions.derivatives.");this.extensions.derivatives=a}}});sa.prototype=Object.assign(Object.create({constructor:sa,apply:function(a){console.warn("THREE.EventDispatcher: .apply is deprecated, just inherit or Object.assign the prototype to mix-in."); -Object.assign(a,this)}}),sa.prototype);Object.defineProperties(Ae.prototype,{dynamic:{set:function(a){console.warn("THREE.Uniform: .dynamic has been removed. Use object.onBeforeRender() instead.")}},onUpdate:{value:function(){console.warn("THREE.Uniform: .onUpdate() has been removed. Use object.onBeforeRender() instead.");return this}}});Object.assign(Dd.prototype,{supportsFloatTextures:function(){console.warn("THREE.WebGLRenderer: .supportsFloatTextures() is now .extensions.get( 'OES_texture_float' )."); -return this.extensions.get("OES_texture_float")},supportsHalfFloatTextures:function(){console.warn("THREE.WebGLRenderer: .supportsHalfFloatTextures() is now .extensions.get( 'OES_texture_half_float' ).");return this.extensions.get("OES_texture_half_float")},supportsStandardDerivatives:function(){console.warn("THREE.WebGLRenderer: .supportsStandardDerivatives() is now .extensions.get( 'OES_standard_derivatives' ).");return this.extensions.get("OES_standard_derivatives")},supportsCompressedTextureS3TC:function(){console.warn("THREE.WebGLRenderer: .supportsCompressedTextureS3TC() is now .extensions.get( 'WEBGL_compressed_texture_s3tc' )."); -return this.extensions.get("WEBGL_compressed_texture_s3tc")},supportsCompressedTexturePVRTC:function(){console.warn("THREE.WebGLRenderer: .supportsCompressedTexturePVRTC() is now .extensions.get( 'WEBGL_compressed_texture_pvrtc' ).");return this.extensions.get("WEBGL_compressed_texture_pvrtc")},supportsBlendMinMax:function(){console.warn("THREE.WebGLRenderer: .supportsBlendMinMax() is now .extensions.get( 'EXT_blend_minmax' ).");return this.extensions.get("EXT_blend_minmax")},supportsVertexTextures:function(){return this.capabilities.vertexTextures}, -supportsInstancedArrays:function(){console.warn("THREE.WebGLRenderer: .supportsInstancedArrays() is now .extensions.get( 'ANGLE_instanced_arrays' ).");return this.extensions.get("ANGLE_instanced_arrays")},enableScissorTest:function(a){console.warn("THREE.WebGLRenderer: .enableScissorTest() is now .setScissorTest().");this.setScissorTest(a)},initMaterial:function(){console.warn("THREE.WebGLRenderer: .initMaterial() has been removed.")},addPrePlugin:function(){console.warn("THREE.WebGLRenderer: .addPrePlugin() has been removed.")}, -addPostPlugin:function(){console.warn("THREE.WebGLRenderer: .addPostPlugin() has been removed.")},updateShadowMap:function(){console.warn("THREE.WebGLRenderer: .updateShadowMap() has been removed.")}});Object.defineProperties(Dd.prototype,{shadowMapEnabled:{get:function(){return this.shadowMap.enabled},set:function(a){console.warn("THREE.WebGLRenderer: .shadowMapEnabled is now .shadowMap.enabled.");this.shadowMap.enabled=a}},shadowMapType:{get:function(){return this.shadowMap.type},set:function(a){console.warn("THREE.WebGLRenderer: .shadowMapType is now .shadowMap.type."); -this.shadowMap.type=a}},shadowMapCullFace:{get:function(){return this.shadowMap.cullFace},set:function(a){console.warn("THREE.WebGLRenderer: .shadowMapCullFace is now .shadowMap.cullFace.");this.shadowMap.cullFace=a}}});Object.defineProperties(pe.prototype,{cullFace:{get:function(){return this.renderReverseSided?2:1},set:function(a){a=1!==a;console.warn("WebGLRenderer: .shadowMap.cullFace is deprecated. Set .shadowMap.renderReverseSided to "+a+".");this.renderReverseSided=a}}});Object.defineProperties(Db.prototype, -{wrapS:{get:function(){console.warn("THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.");return this.texture.wrapS},set:function(a){console.warn("THREE.WebGLRenderTarget: .wrapS is now .texture.wrapS.");this.texture.wrapS=a}},wrapT:{get:function(){console.warn("THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.");return this.texture.wrapT},set:function(a){console.warn("THREE.WebGLRenderTarget: .wrapT is now .texture.wrapT.");this.texture.wrapT=a}},magFilter:{get:function(){console.warn("THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter."); -return this.texture.magFilter},set:function(a){console.warn("THREE.WebGLRenderTarget: .magFilter is now .texture.magFilter.");this.texture.magFilter=a}},minFilter:{get:function(){console.warn("THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.");return this.texture.minFilter},set:function(a){console.warn("THREE.WebGLRenderTarget: .minFilter is now .texture.minFilter.");this.texture.minFilter=a}},anisotropy:{get:function(){console.warn("THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy."); -return this.texture.anisotropy},set:function(a){console.warn("THREE.WebGLRenderTarget: .anisotropy is now .texture.anisotropy.");this.texture.anisotropy=a}},offset:{get:function(){console.warn("THREE.WebGLRenderTarget: .offset is now .texture.offset.");return this.texture.offset},set:function(a){console.warn("THREE.WebGLRenderTarget: .offset is now .texture.offset.");this.texture.offset=a}},repeat:{get:function(){console.warn("THREE.WebGLRenderTarget: .repeat is now .texture.repeat.");return this.texture.repeat}, -set:function(a){console.warn("THREE.WebGLRenderTarget: .repeat is now .texture.repeat.");this.texture.repeat=a}},format:{get:function(){console.warn("THREE.WebGLRenderTarget: .format is now .texture.format.");return this.texture.format},set:function(a){console.warn("THREE.WebGLRenderTarget: .format is now .texture.format.");this.texture.format=a}},type:{get:function(){console.warn("THREE.WebGLRenderTarget: .type is now .texture.type.");return this.texture.type},set:function(a){console.warn("THREE.WebGLRenderTarget: .type is now .texture.type."); -this.texture.type=a}},generateMipmaps:{get:function(){console.warn("THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.");return this.texture.generateMipmaps},set:function(a){console.warn("THREE.WebGLRenderTarget: .generateMipmaps is now .texture.generateMipmaps.");this.texture.generateMipmaps=a}}});Object.assign(dc.prototype,{load:function(a){console.warn("THREE.Audio: .load has been deprecated. Please use THREE.AudioLoader.");var b=this;(new Od).load(a,function(a){b.setBuffer(a)}); -return this}});Object.assign(Rd.prototype,{getData:function(a){console.warn("THREE.AudioAnalyser: .getData() is now .getFrequencyData().");return this.getFrequencyData()}});l.WebGLRenderTargetCube=Eb;l.WebGLRenderTarget=Db;l.WebGLRenderer=Dd;l.ShaderLib=Gb;l.UniformsLib=W;l.UniformsUtils=La;l.ShaderChunk=X;l.FogExp2=Ib;l.Fog=Jb;l.Scene=jb;l.LensFlare=Ed;l.Sprite=qc;l.LOD=rc;l.SkinnedMesh=dd;l.Skeleton=bd;l.Bone=cd;l.Mesh=ya;l.LineSegments=la;l.Line=Ta;l.Points=Kb;l.Group=sc;l.VideoTexture=ed;l.DataTexture= -lb;l.CompressedTexture=Lb;l.CubeTexture=Xa;l.CanvasTexture=fd;l.DepthTexture=tc;l.TextureIdCount=function(){return ee++};l.Texture=da;l.MaterialIdCount=function(){return oe++};l.CompressedTextureLoader=we;l.BinaryTextureLoader=Gd;l.DataTextureLoader=Gd;l.CubeTextureLoader=Hd;l.TextureLoader=gd;l.ObjectLoader=xe;l.MaterialLoader=ud;l.BufferGeometryLoader=Id;l.DefaultLoadingManager=Ga;l.LoadingManager=Fd;l.JSONLoader=Jd;l.ImageLoader=Lc;l.FontLoader=ye;l.XHRLoader=Ja;l.Loader=wb;l.Cache=ce;l.AudioLoader= -Od;l.SpotLightShadow=id;l.SpotLight=jd;l.PointLight=kd;l.HemisphereLight=hd;l.DirectionalLightShadow=ld;l.DirectionalLight=md;l.AmbientLight=nd;l.LightShadow=tb;l.Light=pa;l.StereoCamera=ze;l.PerspectiveCamera=Ea;l.OrthographicCamera=Hb;l.CubeCamera=vd;l.Camera=Z;l.AudioListener=Pd;l.PositionalAudio=Qd;l.getAudioContext=Md;l.AudioAnalyser=Rd;l.Audio=dc;l.VectorKeyframeTrack=bc;l.StringKeyframeTrack=rd;l.QuaternionKeyframeTrack=Nc;l.NumberKeyframeTrack=cc;l.ColorKeyframeTrack=td;l.BooleanKeyframeTrack= -sd;l.PropertyMixer=wd;l.PropertyBinding=fa;l.KeyframeTrack=vb;l.AnimationUtils=ma;l.AnimationObjectGroup=Sd;l.AnimationMixer=Ud;l.AnimationClip=Ha;l.Uniform=Ae;l.InstancedBufferGeometry=Bb;l.BufferGeometry=G;l.GeometryIdCount=function(){return ad++};l.Geometry=Q;l.InterleavedBufferAttribute=Vd;l.InstancedInterleavedBuffer=fc;l.InterleavedBuffer=ec;l.InstancedBufferAttribute=gc;l.DynamicBufferAttribute=function(a,b){console.warn("THREE.DynamicBufferAttribute has been removed. Use new THREE.BufferAttribute().setDynamic( true ) instead."); -return(new C(a,b)).setDynamic(!0)};l.Float64Attribute=function(a,b){return new C(new Float64Array(a),b)};l.Float32Attribute=ha;l.Uint32Attribute=$c;l.Int32Attribute=function(a,b){return new C(new Int32Array(a),b)};l.Uint16Attribute=Zc;l.Int16Attribute=function(a,b){return new C(new Int16Array(a),b)};l.Uint8ClampedAttribute=function(a,b){return new C(new Uint8ClampedArray(a),b)};l.Uint8Attribute=function(a,b){return new C(new Uint8Array(a),b)};l.Int8Attribute=function(a,b){return new C(new Int8Array(a), -b)};l.BufferAttribute=C;l.Face3=ea;l.Object3DIdCount=function(){return qe++};l.Object3D=z;l.Raycaster=Wd;l.Layers=Yc;l.EventDispatcher=sa;l.Clock=Yd;l.QuaternionLinearInterpolant=qd;l.LinearInterpolant=Mc;l.DiscreteInterpolant=pd;l.CubicInterpolant=od;l.Interpolant=qa;l.Triangle=wa;l.Spline=function(a){function b(a,b,c,d,e,f,g){a=.5*(c-a);d=.5*(d-b);return(2*(b-c)+a+d)*g+(-3*(b-c)-2*a-d)*f+a*e+b}this.points=a;var c=[],d={x:0,y:0,z:0},e,f,g,h,k,l,w,n,p;this.initFromArray=function(a){this.points=[]; -for(var b=0;bthis.points.length-2?this.points.length-1:f+1;c[3]=f>this.points.length-3?this.points.length-1:f+2;l=this.points[c[0]];w=this.points[c[1]];n=this.points[c[2]];p=this.points[c[3]];h=g*g;k=g*h;d.x=b(l.x,w.x,n.x,p.x,g,h,k);d.y=b(l.y,w.y,n.y,p.y,g,h,k);d.z=b(l.z,w.z,n.z,p.z,g,h,k);return d};this.getControlPointsArray=function(){var a, -b,c=this.points.length,d=[];for(a=0;a - """ diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/tools/finecapeval_inference.py b/spaces/NAACL2022/CLIP-Caption-Reward/tools/finecapeval_inference.py deleted file mode 100644 index 260b083e00df7c9b2349be23fd2a09591dec3f2b..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/tools/finecapeval_inference.py +++ /dev/null @@ -1,186 +0,0 @@ -import sys -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim - -import numpy as np - -import time -import os -from collections import defaultdict -import json - -import captioning.utils.opts as opts -import captioning.models as models -from captioning.data.pth_loader import CaptionDataset -import captioning.utils.eval_utils as eval_utils -# import captioning.utils.vizwiz_eval_utils as vizwiz_eval_utils -import captioning.utils.misc as utils -from captioning.utils.rewards import init_scorer, get_self_critical_reward -from captioning.modules.loss_wrapper import LossWrapper - -import pytorch_lightning as pl - - -class ModelCheckpoint(pl.callbacks.ModelCheckpoint): - - def on_keyboard_interrupt(self, trainer, pl_module): - # Save model when keyboard interrupt - filepath = os.path.join(self.dirpath, self.prefix + 'interrupt.ckpt') - self._save_model(filepath) - - -if __name__ == '__main__': - - device = 'cuda' - - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--reward', type=str, default='mle') - args = parser.parse_args() - - if args.reward == 'mle': - cfg = f'configs/phase1/fg_clipRN50_{args.reward}.yml' - else: - cfg = f'configs/phase2/fg_clipRN50_{args.reward}.yml' - - print("Loading cfg from", cfg) - - opt = opts.parse_opt(parse=False, cfg=cfg) - - dataset = CaptionDataset(opt) - - opt.vocab_size = dataset.vocab_size - opt.seq_length = dataset.seq_length - - opt.batch_size = 40 - - opt.vocab = dataset.get_vocab() - - model = models.setup(opt) - del opt.vocab - - ckpt_path = opt.checkpoint_path + '-last.ckpt' - - print("Loading checkpoint from", ckpt_path) - raw_state_dict = torch.load( - ckpt_path, - map_location=device) - - strict = True - - state_dict = raw_state_dict['state_dict'] - - if '_vocab' in state_dict: - model.vocab = utils.deserialize(state_dict['_vocab']) - del state_dict['_vocab'] - elif strict: - raise KeyError - if '_opt' in state_dict: - saved_model_opt = utils.deserialize(state_dict['_opt']) - del state_dict['_opt'] - # Make sure the saved opt is compatible with the curren topt - need_be_same = ["caption_model", - "rnn_type", "rnn_size", "num_layers"] - for checkme in need_be_same: - if getattr(saved_model_opt, checkme) in ['updown', 'topdown'] and \ - getattr(opt, checkme) in ['updown', 'topdown']: - continue - assert getattr(saved_model_opt, checkme) == getattr( - opt, checkme), "Command line argument and saved model disagree on '%s' " % checkme - elif strict: - raise KeyError - res = model.load_state_dict(state_dict, strict) - print(res) - - opt.use_grammar = False - - lw_model = LossWrapper(model, opt) - - split = 'test' - - print("Building dataloader...") - - test_dataset = torch.utils.data.Subset( - dataset, - dataset.split_ix[split] - ) - test_loader = torch.utils.data.DataLoader( - test_dataset, - batch_size=opt.batch_size, - shuffle=False, - num_workers=4, - drop_last=False, - collate_fn=dataset.collate_func - ) - - eval_kwargs = {'dataset': opt.input_json} - eval_kwargs.update(vars(opt)) - - verbose = eval_kwargs.get('verbose', True) - verbose_beam = eval_kwargs.get('verbose_beam', 0) - verbose_loss = eval_kwargs.get('verbose_loss', 1) - # num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1)) - # lang_eval = eval_kwargs.get('language_eval', 0) - dataset = eval_kwargs.get('dataset', 'coco') - beam_size = eval_kwargs.get('beam_size', 1) - sample_n = eval_kwargs.get('sample_n', 1) - remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0) - - crit = lw_model.crit - - model = model.to(device) - - from tqdm import tqdm - - test_id2sent = {} - - model.eval() - - print("running inference...") - - for data in tqdm(test_loader): - with torch.no_grad(): - # forward the model to get loss - tmp = [data['fc_feats'], data['att_feats'], - data['labels'], data['masks'], data['att_masks']] - tmp = [d.to(device) if isinstance(d, torch.Tensor) else d for d in tmp] - - fc_feats, att_feats, labels, masks, att_masks = tmp - - loss = crit(model(fc_feats, att_feats, - labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:]) - - # forward the model to also get generated samples for each image - # Only leave one feature for each image, in case duplicate sample - tmp_eval_kwargs = eval_kwargs.copy() - tmp_eval_kwargs.update({'sample_n': 1}) - seq, seq_logprobs = model( - fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample') - seq = seq.data - entropy = - (F.softmax(seq_logprobs, dim=2) * - seq_logprobs).sum(2).sum(1) / ((seq > 0).to(seq_logprobs).sum(1)+1) - perplexity = - \ - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze( - 2).sum(1) / ((seq > 0).to(seq_logprobs).sum(1)+1) - - # Print beam search - if beam_size > 1 and verbose_beam: - for i in range(fc_feats.shape[0]): - print('\n'.join([utils.decode_sequence(model.vocab, _[ - 'seq'].unsqueeze(0))[0] for _ in model.done_beams[i]])) - print('--' * 10) - sents = utils.decode_sequence(model.vocab, seq) - - for d, sent in zip(data['infos'], sents): - test_id2sent[d['id']] = sent - - res_path = f'FineCapEval_results/clipRN50_{args.reward}.json' - - print("Results save at {}".format(res_path)) - - with open(res_path, 'w') as f: - json.dump(test_id2sent, f) - - diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/transformer_main_test.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/transformer_main_test.py deleted file mode 100644 index a65cc4bcbf3a1c4281a36730a1ab60c496f3c7aa..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/transformer_main_test.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test Transformer model.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import re -import sys -import unittest - -from absl import flags -from absl.testing import flagsaver -import tensorflow as tf -from tensorflow.python.eager import context # pylint: disable=ungrouped-imports -from official.nlp.transformer import misc -from official.nlp.transformer import transformer_main -from official.utils.misc import keras_utils - -FLAGS = flags.FLAGS -FIXED_TIMESTAMP = 'my_time_stamp' -WEIGHT_PATTERN = re.compile(r'weights-epoch-.+\.hdf5') - - -def _generate_file(filepath, lines): - with open(filepath, 'w') as f: - for l in lines: - f.write('{}\n'.format(l)) - - -class TransformerTaskTest(tf.test.TestCase): - local_flags = None - - def setUp(self): - temp_dir = self.get_temp_dir() - if TransformerTaskTest.local_flags is None: - misc.define_transformer_flags() - # Loads flags, array cannot be blank. - flags.FLAGS(['foo']) - TransformerTaskTest.local_flags = flagsaver.save_flag_values() - else: - flagsaver.restore_flag_values(TransformerTaskTest.local_flags) - FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP) - FLAGS.param_set = 'tiny' - FLAGS.use_synthetic_data = True - FLAGS.steps_between_evals = 1 - FLAGS.train_steps = 2 - FLAGS.validation_steps = 1 - FLAGS.batch_size = 8 - FLAGS.max_length = 1 - FLAGS.num_gpus = 1 - FLAGS.distribution_strategy = 'off' - FLAGS.dtype = 'fp32' - self.model_dir = FLAGS.model_dir - self.temp_dir = temp_dir - self.vocab_file = os.path.join(temp_dir, 'vocab') - self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size'] - self.bleu_source = os.path.join(temp_dir, 'bleu_source') - self.bleu_ref = os.path.join(temp_dir, 'bleu_ref') - self.orig_policy = ( - tf.compat.v2.keras.mixed_precision.experimental.global_policy()) - - def tearDown(self): - tf.compat.v2.keras.mixed_precision.experimental.set_policy(self.orig_policy) - - def _assert_exists(self, filepath): - self.assertTrue(os.path.exists(filepath)) - - def test_train_no_dist_strat(self): - if context.num_gpus() >= 2: - self.skipTest('No need to test 2+ GPUs without a distribution strategy.') - t = transformer_main.TransformerTask(FLAGS) - t.train() - - def test_train_static_batch(self): - if context.num_gpus() >= 2: - self.skipTest('No need to test 2+ GPUs without a distribution strategy.') - FLAGS.distribution_strategy = 'one_device' - if tf.test.is_built_with_cuda(): - FLAGS.num_gpus = 1 - else: - FLAGS.num_gpus = 0 - FLAGS.static_batch = True - t = transformer_main.TransformerTask(FLAGS) - t.train() - - @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') - def test_train_1_gpu_with_dist_strat(self): - FLAGS.distribution_strategy = 'one_device' - t = transformer_main.TransformerTask(FLAGS) - t.train() - - @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') - def test_train_fp16(self): - FLAGS.distribution_strategy = 'one_device' - FLAGS.dtype = 'fp16' - t = transformer_main.TransformerTask(FLAGS) - t.train() - - @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') - def test_train_2_gpu(self): - if context.num_gpus() < 2: - self.skipTest( - '{} GPUs are not available for this test. {} GPUs are available' - .format(2, context.num_gpus())) - FLAGS.distribution_strategy = 'mirrored' - FLAGS.num_gpus = 2 - FLAGS.param_set = 'base' - t = transformer_main.TransformerTask(FLAGS) - t.train() - - @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') - def test_train_2_gpu_fp16(self): - if context.num_gpus() < 2: - self.skipTest( - '{} GPUs are not available for this test. {} GPUs are available' - .format(2, context.num_gpus())) - FLAGS.distribution_strategy = 'mirrored' - FLAGS.num_gpus = 2 - FLAGS.param_set = 'base' - FLAGS.dtype = 'fp16' - t = transformer_main.TransformerTask(FLAGS) - t.train() - - def _prepare_files_and_flags(self, *extra_flags): - # Make log dir. - if not os.path.exists(self.temp_dir): - os.makedirs(self.temp_dir) - - # Fake vocab, bleu_source and bleu_ref. - tokens = [ - "''", "''", "'_'", "'a'", "'b'", "'c'", "'d'", "'a_'", "'b_'", - "'c_'", "'d_'" - ] - tokens += ["'{}'".format(i) for i in range(self.vocab_size - len(tokens))] - _generate_file(self.vocab_file, tokens) - _generate_file(self.bleu_source, ['a b', 'c d']) - _generate_file(self.bleu_ref, ['a b', 'd c']) - - # Update flags. - update_flags = [ - 'ignored_program_name', - '--vocab_file={}'.format(self.vocab_file), - '--bleu_source={}'.format(self.bleu_source), - '--bleu_ref={}'.format(self.bleu_ref), - ] - if extra_flags: - update_flags.extend(extra_flags) - FLAGS(update_flags) - - def test_predict(self): - if context.num_gpus() >= 2: - self.skipTest('No need to test 2+ GPUs without a distribution strategy.') - self._prepare_files_and_flags() - t = transformer_main.TransformerTask(FLAGS) - t.predict() - - @unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU') - def test_predict_fp16(self): - if context.num_gpus() >= 2: - self.skipTest('No need to test 2+ GPUs without a distribution strategy.') - self._prepare_files_and_flags('--dtype=fp16') - t = transformer_main.TransformerTask(FLAGS) - t.predict() - - def test_eval(self): - if context.num_gpus() >= 2: - self.skipTest('No need to test 2+ GPUs without a distribution strategy.') - if 'test_xla' in sys.argv[0]: - self.skipTest('TODO(xla): Make this test faster under XLA.') - self._prepare_files_and_flags() - t = transformer_main.TransformerTask(FLAGS) - t.eval() - - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_classifier.py b/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_classifier.py deleted file mode 100644 index f498d2c2fb9fd16f5c38bc10e9d80c124e127cb4..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/adversarial_text/train_classifier.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Trains LSTM text classification model. - -Model trains with adversarial or virtual adversarial training. - -Computational time: - 1.8 hours to train 10000 steps without adversarial or virtual adversarial - training, on 1 layer 1024 hidden units LSTM, 256 embeddings, 400 truncated - BP, 64 minibatch and on single GPU (Pascal Titan X, cuDNNv5). - - 4 hours to train 10000 steps with adversarial or virtual adversarial - training, with above condition. - -To initialize embedding and LSTM cell weights from a pretrained model, set -FLAGS.pretrained_model_dir to the pretrained model's checkpoint directory. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import tensorflow as tf - -import graphs -import train_utils - -flags = tf.app.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string('pretrained_model_dir', None, - 'Directory path to pretrained model to restore from') - - -def main(_): - """Trains LSTM classification model.""" - tf.logging.set_verbosity(tf.logging.INFO) - with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): - model = graphs.get_model() - train_op, loss, global_step = model.classifier_training() - train_utils.run_training( - train_op, - loss, - global_step, - variables_to_restore=model.pretrained_variables, - pretrained_model_dir=FLAGS.pretrained_model_dir) - - -if __name__ == '__main__': - tf.app.run() diff --git a/spaces/NachtYoru/Linaqruf-anything-v3-better-vae/README.md b/spaces/NachtYoru/Linaqruf-anything-v3-better-vae/README.md deleted file mode 100644 index 0c078f4237c501c81de3929126547a54d926eaa8..0000000000000000000000000000000000000000 --- a/spaces/NachtYoru/Linaqruf-anything-v3-better-vae/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Linaqruf Anything V3 Better Vae -emoji: 🐠 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nephele/bert-vits2-multi-voice/monotonic_align/__init__.py b/spaces/Nephele/bert-vits2-multi-voice/monotonic_align/__init__.py deleted file mode 100644 index a323673bb16070d6d0fffddb939b657d0915ff1b..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/monotonic_align/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) \ No newline at end of file diff --git a/spaces/NeuralInternet/Audio-to-Text_Playground/README.md b/spaces/NeuralInternet/Audio-to-Text_Playground/README.md deleted file mode 100644 index 8bad0410a43cc1c980242dd4af429859a034558b..0000000000000000000000000000000000000000 --- a/spaces/NeuralInternet/Audio-to-Text_Playground/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Audio-to-Text Playground -emoji: 🤫 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -tags: -- whisper-event -duplicated_from: sanchit-gandhi/whisper-large-v2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py deleted file mode 100644 index 9d0ffeb27d038a6b82aaf0f6bdf208af565663f6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/unsupervised/scripts/normalize_text.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import regex -import sys - - -def main(): - filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]") - - for line in sys.stdin: - line = line.strip() - line = filter_r.sub(" ", line) - line = " ".join(line.split()) - print(line) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/wav2vec_manifest.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/wav2vec_manifest.py deleted file mode 100644 index 9b8aa180e88d9ee98bdca7089aed5046ec0d9cb9..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/wav2vec_manifest.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Data pre-processing: build vocabularies and binarize training data. -""" - -import argparse -import glob -import os -import random - -import soundfile - - -def get_parser(): - parser = argparse.ArgumentParser() - parser.add_argument( - "root", metavar="DIR", help="root directory containing flac files to index" - ) - parser.add_argument( - "--valid-percent", - default=0.01, - type=float, - metavar="D", - help="percentage of data to use as validation set (between 0 and 1)", - ) - parser.add_argument( - "--dest", default=".", type=str, metavar="DIR", help="output directory" - ) - parser.add_argument( - "--ext", default="flac", type=str, metavar="EXT", help="extension to look for" - ) - parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed") - parser.add_argument( - "--path-must-contain", - default=None, - type=str, - metavar="FRAG", - help="if set, path must contain this substring for a file to be included in the manifest", - ) - return parser - - -def main(args): - assert args.valid_percent >= 0 and args.valid_percent <= 1.0 - - if not os.path.exists(args.dest): - os.makedirs(args.dest) - - dir_path = os.path.realpath(args.root) - search_path = os.path.join(dir_path, "**/*." + args.ext) - rand = random.Random(args.seed) - - valid_f = ( - open(os.path.join(args.dest, "valid.tsv"), "w") - if args.valid_percent > 0 - else None - ) - - with open(os.path.join(args.dest, "train.tsv"), "w") as train_f: - print(dir_path, file=train_f) - - if valid_f is not None: - print(dir_path, file=valid_f) - - for fname in glob.iglob(search_path, recursive=True): - file_path = os.path.realpath(fname) - - if args.path_must_contain and args.path_must_contain not in file_path: - continue - - frames = soundfile.info(fname).frames - dest = train_f if rand.random() > args.valid_percent else valid_f - print( - "{}\t{}".format(os.path.relpath(file_path, dir_path), frames), file=dest - ) - if valid_f is not None: - valid_f.close() - - -if __name__ == "__main__": - parser = get_parser() - args = parser.parse_args() - main(args) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py deleted file mode 100644 index 07b338dcfd2d7f10317608274631d0edd93ba889..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import os -import glob -import argparse -from utils.dedup import deup -import sys - -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - -def get_directions(folder): - raw_files = glob.glob(f'{folder}/train*') - directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] - return directions - -def diff_list(lhs, rhs): - return set(lhs).difference(set(rhs)) - -def check_diff( - from_src_file, from_tgt_file, - to_src_file, to_tgt_file, -): - seen_in_from = set() - seen_src_in_from = set() - seen_tgt_in_from = set() - from_count = 0 - with open(from_src_file, encoding='utf-8') as fsrc, \ - open(from_tgt_file, encoding='utf-8') as ftgt: - for s, t in zip(fsrc, ftgt): - seen_in_from.add((s, t)) - seen_src_in_from.add(s) - seen_tgt_in_from.add(t) - from_count += 1 - common = 0 - common_src = 0 - common_tgt = 0 - to_count = 0 - seen = set() - - with open(to_src_file, encoding='utf-8') as fsrc, \ - open(to_tgt_file, encoding='utf-8') as ftgt: - for s, t in zip(fsrc, ftgt): - to_count += 1 - if (s, t) not in seen: - if (s, t) in seen_in_from: - common += 1 - if s in seen_src_in_from: - common_src += 1 - seen_src_in_from.remove(s) - if t in seen_tgt_in_from: - common_tgt += 1 - seen_tgt_in_from.remove(t) - seen.add((s, t)) - return common, common_src, common_tgt, from_count, to_count - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--folder", type=str, required=True, - help="the data folder ") - parser.add_argument("--split", type=str, default='test', - help="split (valid, test) to check against training data") - parser.add_argument('--directions', type=str, default=None, required=False) - - args = parser.parse_args() - - if args.directions is None: - directions = set(get_directions(args.folder)) - directions = sorted(directions) - else: - directions = args.directions.split(',') - directions = sorted(set(directions)) - - results = [] - print(f'checking where {args.split} split data are in training') - print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size') - - for direction in directions: - src, tgt = direction.split('-') - from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}' - from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}' - if not os.path.exists(from_src_file): - # some test/valid data might in reverse directinos: - from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}' - from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}' - to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}' - to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}' - if not os.path.exists(to_src_file) or not os.path.exists(from_src_file): - continue - r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file) - results.append(r) - print(f'{direction}\t', '\t'.join(map(str, r))) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/evaluation/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/language_model/README.adaptive_inputs.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/language_model/README.adaptive_inputs.md deleted file mode 100644 index 6650d58f37f320aa46402d59ce6494b2dd1c3faa..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/language_model/README.adaptive_inputs.md +++ /dev/null @@ -1,39 +0,0 @@ -# Adaptive Input Representations for Neural Language Modeling (Baevski and Auli, 2018) - -## Pre-trained models - -Description | Parameters | Dataset | Model and Test set(s) ----|---:|---|--- -Adaptive Inputs
    ([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 1026M | [Google Billion Words](https://github.com/ciprian-chelba/1-billion-word-language-modeling-benchmark) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2) -Adaptive Inputs
    ([Baevski and Auli, 2018](https://arxiv.org/abs/1809.10853)) | 247M | [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2) - -## Training an LM with adaptive inputs - -First, see the general [language modeling README](README.md) for instructions on -preprocessing the WikiText-103 data. - -Then use the following training command to train a model with adaptive inputs -using the `transformer_lm_wiki103` model architecture: -```bash -fairseq-train --task language_modeling \ - data-bin/wikitext-103 \ - --save-dir checkpoints/transformer_wikitext-103 \ - --arch transformer_lm_wiki103 \ - --max-update 286000 --lr 1.0 --t-mult 2 --lr-period-updates 270000 --lr-scheduler cosine --lr-shrink 0.75 \ - --warmup-updates 16000 --warmup-init-lr 1e-07 --stop-min-lr 1e-09 --optimizer nag --min-lr 0.0001 --clip-norm 0.1 \ - --criterion adaptive_loss --max-tokens 3072 --update-freq 3 --tokens-per-sample 3072 --seed 1 \ - --sample-break-mode none --skip-invalid-size-inputs-valid-test --ddp-backend=legacy_ddp -``` - -## Citation - -```bibtex -@inproceedings{ - baevski2018adaptive, - title={Adaptive Input Representations for Neural Language Modeling}, - author={Alexei Baevski and Michael Auli}, - booktitle={International Conference on Learning Representations}, - year={2019}, - url={https://openreview.net/forum?id=ByxZX20qFQ}, -} -``` diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/tokenizers/seg_ja.sh b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/tokenizers/seg_ja.sh deleted file mode 100644 index be6f5ca5fe4ac8e8c786a439caaed1d1314f1aef..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/m2m_100/tokenizers/seg_ja.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -SCRIPT=`realpath $0` -KYTEA=`dirname $SCRIPT`/thirdparty/kytea -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib -export PATH=$PATH:"$KYTEA/bin" - -cat - | tr -d "[:blank:]" | kytea -notags diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation_moe/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation_moe/README.md deleted file mode 100644 index 2e5c8af617f410f64ca38d29447bd05b6af8c5a8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/translation_moe/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019) - -This page includes instructions for reproducing results from the paper [Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)](https://arxiv.org/abs/1902.07816). - -## Download data - -First, follow the [instructions to download and preprocess the WMT'17 En-De dataset](../translation#prepare-wmt14en2desh). -Make sure to learn a joint vocabulary by passing the `--joined-dictionary` option to `fairseq-preprocess`. - -## Train a model - -Then we can train a mixture of experts model using the `translation_moe` task. -Use the `--method` flag to choose the MoE variant; we support hard mixtures with a learned or uniform prior (`--method hMoElp` and `hMoEup`, respectively) and soft mixures (`--method sMoElp` and `sMoEup`). -The model is trained with online responsibility assignment and shared parameterization. - -The following command will train a `hMoElp` model with `3` experts: -```bash -fairseq-train --ddp-backend='legacy_ddp' \ - data-bin/wmt17_en_de \ - --max-update 100000 \ - --task translation_moe --user-dir examples/translation_moe/translation_moe_src \ - --method hMoElp --mean-pool-gating-network \ - --num-experts 3 \ - --arch transformer_wmt_en_de --share-all-embeddings \ - --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ - --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 4000 \ - --lr 0.0007 \ - --dropout 0.1 --weight-decay 0.0 --criterion cross_entropy \ - --max-tokens 3584 -``` - -## Translate - -Once a model is trained, we can generate translations from different experts using the `--gen-expert` option. -For example, to generate from expert 0: -```bash -fairseq-generate data-bin/wmt17_en_de \ - --path checkpoints/checkpoint_best.pt \ - --beam 1 --remove-bpe \ - --task translation_moe --user-dir examples/translation_moe/translation_moe_src \ - --method hMoElp --mean-pool-gating-network \ - --num-experts 3 \ - --gen-expert 0 -``` - -## Evaluate - -First download a tokenized version of the WMT'14 En-De test set with multiple references: -```bash -wget dl.fbaipublicfiles.com/fairseq/data/wmt14-en-de.extra_refs.tok -``` - -Next apply BPE on the fly and run generation for each expert: -```bash -BPE_CODE=examples/translation/wmt17_en_de/code -for EXPERT in $(seq 0 2); do \ - cat wmt14-en-de.extra_refs.tok \ - | grep ^S | cut -f 2 \ - | fairseq-interactive data-bin/wmt17_en_de \ - --path checkpoints/checkpoint_best.pt \ - --beam 1 \ - --bpe subword_nmt --bpe-codes $BPE_CODE \ - --buffer-size 500 --max-tokens 6000 \ - --task translation_moe --user-dir examples/translation_moe/translation_moe_src \ - --method hMoElp --mean-pool-gating-network \ - --num-experts 3 \ - --gen-expert $EXPERT ; \ -done > wmt14-en-de.extra_refs.tok.gen.3experts -``` - -Finally use `score_moe.py` to compute pairwise BLUE and average oracle BLEU: -```bash -python examples/translation_moe/score.py --sys wmt14-en-de.extra_refs.tok.gen.3experts --ref wmt14-en-de.extra_refs.tok -# pairwise BLEU: 48.26 -# #refs covered: 2.11 -# multi-reference BLEU (leave-one-out): 59.46 -``` -This matches row 3 from Table 7 in the paper. - -## Citation - -```bibtex -@article{shen2019mixture, - title = {Mixture Models for Diverse Machine Translation: Tricks of the Trade}, - author = {Tianxiao Shen and Myle Ott and Michael Auli and Marc'Aurelio Ranzato}, - journal = {International Conference on Machine Learning}, - year = 2019, -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/remove_silence.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/remove_silence.py deleted file mode 100644 index fac88b989703262a84b242b2761df621bf02c739..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/scripts/remove_silence.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -get intervals from .vads file, specify output data, and this script removes silences and saves the audio data in out path folder -paths=shards/train.tsv -vads=shards/train.vads -python remove_silence.py --paths $paths --vads $vads -""" - -import os -import argparse -import torch -import torchaudio -import tqdm - - -parser = argparse.ArgumentParser() -parser.add_argument("--tsv", default="", type=str) -parser.add_argument("--vads", default="", type=str) -parser.add_argument("--out", type=str) -params = parser.parse_args() - -# load paths -paths = [] -with open(params.tsv) as f: - root = next(f).rstrip() - for line in f: - paths.append(os.path.join(root, line.rstrip().split("\t")[0])) - -# load vads -list_intervals = [] -with open(params.vads) as f: - for line in f: - interval = [ - [int(w.split(":")[0]), int(w.split(":")[1])] for w in line.rstrip().split() - ] - list_intervals.append(interval) - - -# load audio and keep only intervals (i.e. remove silences) -for i in tqdm.trange(len(paths)): - data, _ = torchaudio.load(paths[i]) - if len(list_intervals[i]) > 0: - data_filtered = torch.cat( - [data[0][int(it[0]) : int(it[1])] for it in list_intervals[i]] - ).unsqueeze(0) - else: - data_filtered = data - - # YOU MAY NEED TO MODIFY THIS TO GET THE RIGHT SUBPATH - # outpath = params.out + '/'.join(paths[i].split('/')[-1]) - outpath = params.out + "/" + "/".join(paths[i].split("/")[-2:]) - - if not os.path.isdir("/".join(outpath.split("/")[:-1])): - os.makedirs("/".join(outpath.split("/")[:-1])) - if not os.path.exists(outpath): - torchaudio.save(outpath, data_filtered, sample_rate=16000) - else: - print(outpath, "exists!") diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py deleted file mode 100644 index 2ea37c16b4a477c48e4dd4500ec03f2d0c86d611..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -from fairseq import metrics, utils -from fairseq.criterions import register_criterion - -from .label_smoothed_cross_entropy import ( - LabelSmoothedCrossEntropyCriterion, - LabelSmoothedCrossEntropyCriterionConfig, -) - -from dataclasses import dataclass, field - - -@dataclass -class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig( - LabelSmoothedCrossEntropyCriterionConfig -): - alignment_lambda: float = field( - default=0.05, metadata={"help": "weight for the alignment loss"} - ) - - -@register_criterion( - "label_smoothed_cross_entropy_with_alignment", - dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig, -) -class LabelSmoothedCrossEntropyCriterionWithAlignment( - LabelSmoothedCrossEntropyCriterion -): - def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda): - super().__init__(task, sentence_avg, label_smoothing) - self.alignment_lambda = alignment_lambda - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - - alignment_loss = None - - # Compute alignment loss only for training set and non dummy batches. - if "alignments" in sample and sample["alignments"] is not None: - alignment_loss = self.compute_alignment_loss(sample, net_output) - - if alignment_loss is not None: - logging_output["alignment_loss"] = utils.item(alignment_loss.data) - loss += self.alignment_lambda * alignment_loss - - return loss, sample_size, logging_output - - def compute_alignment_loss(self, sample, net_output): - attn_prob = net_output[1]["attn"][0] - bsz, tgt_sz, src_sz = attn_prob.shape - attn = attn_prob.view(bsz * tgt_sz, src_sz) - - align = sample["alignments"] - align_weights = sample["align_weights"].float() - - if len(align) > 0: - # Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to - # the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing. - loss = -( - (attn[align[:, 1][:, None], align[:, 0][:, None]]).log() - * align_weights[:, None] - ).sum() - else: - return None - - return loss - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - nll_loss_sum = utils.item( - sum(log.get("nll_loss", 0) for log in logging_outputs) - ) - alignment_loss_sum = utils.item( - sum(log.get("alignment_loss", 0) for log in logging_outputs) - ) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar( - "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - metrics.log_scalar( - "alignment_loss", - alignment_loss_sum / sample_size / math.log(2), - sample_size, - round=3, - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/OlaWod/FreeVC/speaker_encoder/compute_embed.py b/spaces/OlaWod/FreeVC/speaker_encoder/compute_embed.py deleted file mode 100644 index 2fee33db0168f40efc42145c06fa62016e3e008e..0000000000000000000000000000000000000000 --- a/spaces/OlaWod/FreeVC/speaker_encoder/compute_embed.py +++ /dev/null @@ -1,40 +0,0 @@ -from speaker_encoder import inference as encoder -from multiprocessing.pool import Pool -from functools import partial -from pathlib import Path -# from utils import logmmse -# from tqdm import tqdm -# import numpy as np -# import librosa - - -def embed_utterance(fpaths, encoder_model_fpath): - if not encoder.is_loaded(): - encoder.load_model(encoder_model_fpath) - - # Compute the speaker embedding of the utterance - wav_fpath, embed_fpath = fpaths - wav = np.load(wav_fpath) - wav = encoder.preprocess_wav(wav) - embed = encoder.embed_utterance(wav) - np.save(embed_fpath, embed, allow_pickle=False) - - -def create_embeddings(outdir_root: Path, wav_dir: Path, encoder_model_fpath: Path, n_processes: int): - - wav_dir = outdir_root.joinpath("audio") - metadata_fpath = synthesizer_root.joinpath("train.txt") - assert wav_dir.exists() and metadata_fpath.exists() - embed_dir = synthesizer_root.joinpath("embeds") - embed_dir.mkdir(exist_ok=True) - - # Gather the input wave filepath and the target output embed filepath - with metadata_fpath.open("r") as metadata_file: - metadata = [line.split("|") for line in metadata_file] - fpaths = [(wav_dir.joinpath(m[0]), embed_dir.joinpath(m[2])) for m in metadata] - - # TODO: improve on the multiprocessing, it's terrible. Disk I/O is the bottleneck here. - # Embed the utterances in separate threads - func = partial(embed_utterance, encoder_model_fpath=encoder_model_fpath) - job = Pool(n_processes).imap(func, fpaths) - list(tqdm(job, "Embedding", len(fpaths), unit="utterances")) \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp deleted file mode 100644 index 0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/cocoeval/cocoeval.cpp +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "cocoeval.h" -#include -#include -#include -#include - -using namespace pybind11::literals; - -namespace detectron2 { - -namespace COCOeval { - -// Sort detections from highest score to lowest, such that -// detection_instances[detection_sorted_indices[t]] >= -// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match -// original COCO API -void SortInstancesByDetectionScore( - const std::vector& detection_instances, - std::vector* detection_sorted_indices) { - detection_sorted_indices->resize(detection_instances.size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_instances](size_t j1, size_t j2) { - return detection_instances[j1].score > detection_instances[j2].score; - }); -} - -// Partition the ground truth objects based on whether or not to ignore them -// based on area -void SortInstancesByIgnore( - const std::array& area_range, - const std::vector& ground_truth_instances, - std::vector* ground_truth_sorted_indices, - std::vector* ignores) { - ignores->clear(); - ignores->reserve(ground_truth_instances.size()); - for (auto o : ground_truth_instances) { - ignores->push_back( - o.ignore || o.area < area_range[0] || o.area > area_range[1]); - } - - ground_truth_sorted_indices->resize(ground_truth_instances.size()); - std::iota( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - 0); - std::stable_sort( - ground_truth_sorted_indices->begin(), - ground_truth_sorted_indices->end(), - [&ignores](size_t j1, size_t j2) { - return (int)(*ignores)[j1] < (int)(*ignores)[j2]; - }); -} - -// For each IOU threshold, greedily match each detected instance to a ground -// truth instance (if possible) and store the results -void MatchDetectionsToGroundTruth( - const std::vector& detection_instances, - const std::vector& detection_sorted_indices, - const std::vector& ground_truth_instances, - const std::vector& ground_truth_sorted_indices, - const std::vector& ignores, - const std::vector>& ious, - const std::vector& iou_thresholds, - const std::array& area_range, - ImageEvaluation* results) { - // Initialize memory to store return data matches and ignore - const int num_iou_thresholds = iou_thresholds.size(); - const int num_ground_truth = ground_truth_sorted_indices.size(); - const int num_detections = detection_sorted_indices.size(); - std::vector ground_truth_matches( - num_iou_thresholds * num_ground_truth, 0); - std::vector& detection_matches = results->detection_matches; - std::vector& detection_ignores = results->detection_ignores; - std::vector& ground_truth_ignores = results->ground_truth_ignores; - detection_matches.resize(num_iou_thresholds * num_detections, 0); - detection_ignores.resize(num_iou_thresholds * num_detections, false); - ground_truth_ignores.resize(num_ground_truth); - for (auto g = 0; g < num_ground_truth; ++g) { - ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - for (auto d = 0; d < num_detections; ++d) { - // information about best match so far (match=-1 -> unmatched) - double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); - int match = -1; - for (auto g = 0; g < num_ground_truth; ++g) { - // if this ground truth instance is already matched and not a - // crowd, it cannot be matched to another detection - if (ground_truth_matches[t * num_ground_truth + g] > 0 && - !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { - continue; - } - - // if detected instance matched to a regular ground truth - // instance, we can break on the first ground truth instance - // tagged as ignore (because they are sorted by the ignore tag) - if (match >= 0 && !ground_truth_ignores[match] && - ground_truth_ignores[g]) { - break; - } - - // if IOU overlap is the best so far, store the match appropriately - if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { - best_iou = ious[d][ground_truth_sorted_indices[g]]; - match = g; - } - } - // if match was made, store id of match for both detection and - // ground truth - if (match >= 0) { - detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; - detection_matches[t * num_detections + d] = - ground_truth_instances[ground_truth_sorted_indices[match]].id; - ground_truth_matches[t * num_ground_truth + match] = - detection_instances[detection_sorted_indices[d]].id; - } - - // set unmatched detections outside of area range to ignore - const InstanceAnnotation& detection = - detection_instances[detection_sorted_indices[d]]; - detection_ignores[t * num_detections + d] = - detection_ignores[t * num_detections + d] || - (detection_matches[t * num_detections + d] == 0 && - (detection.area < area_range[0] || detection.area > area_range[1])); - } - } - - // store detection score results - results->detection_scores.resize(detection_sorted_indices.size()); - for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { - results->detection_scores[d] = - detection_instances[detection_sorted_indices[d]].score; - } -} - -std::vector EvaluateImages( - const std::vector>& area_ranges, - int max_detections, - const std::vector& iou_thresholds, - const ImageCategoryInstances>& image_category_ious, - const ImageCategoryInstances& - image_category_ground_truth_instances, - const ImageCategoryInstances& - image_category_detection_instances) { - const int num_area_ranges = area_ranges.size(); - const int num_images = image_category_ground_truth_instances.size(); - const int num_categories = - image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; - std::vector detection_sorted_indices; - std::vector ground_truth_sorted_indices; - std::vector ignores; - std::vector results_all( - num_images * num_area_ranges * num_categories); - - // Store results for each image, category, and area range combination. Results - // for each IOU threshold are packed into the same ImageEvaluation object - for (auto i = 0; i < num_images; ++i) { - for (auto c = 0; c < num_categories; ++c) { - const std::vector& ground_truth_instances = - image_category_ground_truth_instances[i][c]; - const std::vector& detection_instances = - image_category_detection_instances[i][c]; - - SortInstancesByDetectionScore( - detection_instances, &detection_sorted_indices); - if ((int)detection_sorted_indices.size() > max_detections) { - detection_sorted_indices.resize(max_detections); - } - - for (size_t a = 0; a < area_ranges.size(); ++a) { - SortInstancesByIgnore( - area_ranges[a], - ground_truth_instances, - &ground_truth_sorted_indices, - &ignores); - - MatchDetectionsToGroundTruth( - detection_instances, - detection_sorted_indices, - ground_truth_instances, - ground_truth_sorted_indices, - ignores, - image_category_ious[i][c], - iou_thresholds, - area_ranges[a], - &results_all - [c * num_area_ranges * num_images + a * num_images + i]); - } - } - } - - return results_all; -} - -// Convert a python list to a vector -template -std::vector list_to_vec(const py::list& l) { - std::vector v(py::len(l)); - for (int i = 0; i < (int)py::len(l); ++i) { - v[i] = l[i].cast(); - } - return v; -} - -// Helper function to Accumulate() -// Considers the evaluation results applicable to a particular category, area -// range, and max_detections parameter setting, which begin at -// evaluations[evaluation_index]. Extracts a sorted list of length n of all -// applicable detection instances concatenated across all images in the dataset, -// which are represented by the outputs evaluation_indices, detection_scores, -// image_detection_indices, and detection_sorted_indices--all of which are -// length n. evaluation_indices[i] stores the applicable index into -// evaluations[] for instance i, which has detection score detection_score[i], -// and is the image_detection_indices[i]'th of the list of detections -// for the image containing i. detection_sorted_indices[] defines a sorted -// permutation of the 3 other outputs -int BuildSortedDetectionList( - const std::vector& evaluations, - const int64_t evaluation_index, - const int64_t num_images, - const int max_detections, - std::vector* evaluation_indices, - std::vector* detection_scores, - std::vector* detection_sorted_indices, - std::vector* image_detection_indices) { - assert(evaluations.size() >= evaluation_index + num_images); - - // Extract a list of object instances of the applicable category, area - // range, and max detections requirements such that they can be sorted - image_detection_indices->clear(); - evaluation_indices->clear(); - detection_scores->clear(); - image_detection_indices->reserve(num_images * max_detections); - evaluation_indices->reserve(num_images * max_detections); - detection_scores->reserve(num_images * max_detections); - int num_valid_ground_truth = 0; - for (auto i = 0; i < num_images; ++i) { - const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; - - for (int d = 0; - d < (int)evaluation.detection_scores.size() && d < max_detections; - ++d) { // detected instances - evaluation_indices->push_back(evaluation_index + i); - image_detection_indices->push_back(d); - detection_scores->push_back(evaluation.detection_scores[d]); - } - for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { - if (!ground_truth_ignore) { - ++num_valid_ground_truth; - } - } - } - - // Sort detections by decreasing score, using stable sort to match - // python implementation - detection_sorted_indices->resize(detection_scores->size()); - std::iota( - detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); - std::stable_sort( - detection_sorted_indices->begin(), - detection_sorted_indices->end(), - [&detection_scores](size_t j1, size_t j2) { - return (*detection_scores)[j1] > (*detection_scores)[j2]; - }); - - return num_valid_ground_truth; -} - -// Helper function to Accumulate() -// Compute a precision recall curve given a sorted list of detected instances -// encoded in evaluations, evaluation_indices, detection_scores, -// detection_sorted_indices, image_detection_indices (see -// BuildSortedDetectionList()). Using vectors precisions and recalls -// and temporary storage, output the results into precisions_out, recalls_out, -// and scores_out, which are large buffers containing many precion/recall curves -// for all possible parameter settings, with precisions_out_index and -// recalls_out_index defining the applicable indices to store results. -void ComputePrecisionRecallCurve( - const int64_t precisions_out_index, - const int64_t precisions_out_stride, - const int64_t recalls_out_index, - const std::vector& recall_thresholds, - const int iou_threshold_index, - const int num_iou_thresholds, - const int num_valid_ground_truth, - const std::vector& evaluations, - const std::vector& evaluation_indices, - const std::vector& detection_scores, - const std::vector& detection_sorted_indices, - const std::vector& image_detection_indices, - std::vector* precisions, - std::vector* recalls, - std::vector* precisions_out, - std::vector* scores_out, - std::vector* recalls_out) { - assert(recalls_out->size() > recalls_out_index); - - // Compute precision/recall for each instance in the sorted list of detections - int64_t true_positives_sum = 0, false_positives_sum = 0; - precisions->clear(); - recalls->clear(); - precisions->reserve(detection_sorted_indices.size()); - recalls->reserve(detection_sorted_indices.size()); - assert(!evaluations.empty() || detection_sorted_indices.empty()); - for (auto detection_sorted_index : detection_sorted_indices) { - const ImageEvaluation& evaluation = - evaluations[evaluation_indices[detection_sorted_index]]; - const auto num_detections = - evaluation.detection_matches.size() / num_iou_thresholds; - const auto detection_index = iou_threshold_index * num_detections + - image_detection_indices[detection_sorted_index]; - assert(evaluation.detection_matches.size() > detection_index); - assert(evaluation.detection_ignores.size() > detection_index); - const int64_t detection_match = - evaluation.detection_matches[detection_index]; - const bool detection_ignores = - evaluation.detection_ignores[detection_index]; - const auto true_positive = detection_match > 0 && !detection_ignores; - const auto false_positive = detection_match == 0 && !detection_ignores; - if (true_positive) { - ++true_positives_sum; - } - if (false_positive) { - ++false_positives_sum; - } - - const double recall = - static_cast(true_positives_sum) / num_valid_ground_truth; - recalls->push_back(recall); - const int64_t num_valid_detections = - true_positives_sum + false_positives_sum; - const double precision = num_valid_detections > 0 - ? static_cast(true_positives_sum) / num_valid_detections - : 0.0; - precisions->push_back(precision); - } - - (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; - - for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { - if ((*precisions)[i] > (*precisions)[i - 1]) { - (*precisions)[i - 1] = (*precisions)[i]; - } - } - - // Sample the per instance precision/recall list at each recall threshold - for (size_t r = 0; r < recall_thresholds.size(); ++r) { - // first index in recalls >= recall_thresholds[r] - std::vector::iterator low = std::lower_bound( - recalls->begin(), recalls->end(), recall_thresholds[r]); - size_t precisions_index = low - recalls->begin(); - - const auto results_ind = precisions_out_index + r * precisions_out_stride; - assert(results_ind < precisions_out->size()); - assert(results_ind < scores_out->size()); - if (precisions_index < precisions->size()) { - (*precisions_out)[results_ind] = (*precisions)[precisions_index]; - (*scores_out)[results_ind] = - detection_scores[detection_sorted_indices[precisions_index]]; - } else { - (*precisions_out)[results_ind] = 0; - (*scores_out)[results_ind] = 0; - } - } -} -py::dict Accumulate( - const py::object& params, - const std::vector& evaluations) { - const std::vector recall_thresholds = - list_to_vec(params.attr("recThrs")); - const std::vector max_detections = - list_to_vec(params.attr("maxDets")); - const int num_iou_thresholds = py::len(params.attr("iouThrs")); - const int num_recall_thresholds = py::len(params.attr("recThrs")); - const int num_categories = params.attr("useCats").cast() == 1 - ? py::len(params.attr("catIds")) - : 1; - const int num_area_ranges = py::len(params.attr("areaRng")); - const int num_max_detections = py::len(params.attr("maxDets")); - const int num_images = py::len(params.attr("imgIds")); - - std::vector precisions_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - std::vector recalls_out( - num_iou_thresholds * num_categories * num_area_ranges * - num_max_detections, - -1); - std::vector scores_out( - num_iou_thresholds * num_recall_thresholds * num_categories * - num_area_ranges * num_max_detections, - -1); - - // Consider the list of all detected instances in the entire dataset in one - // large list. evaluation_indices, detection_scores, - // image_detection_indices, and detection_sorted_indices all have the same - // length as this list, such that each entry corresponds to one detected - // instance - std::vector evaluation_indices; // indices into evaluations[] - std::vector detection_scores; // detection scores of each instance - std::vector detection_sorted_indices; // sorted indices of all - // instances in the dataset - std::vector - image_detection_indices; // indices into the list of detected instances in - // the same image as each instance - std::vector precisions, recalls; - - for (auto c = 0; c < num_categories; ++c) { - for (auto a = 0; a < num_area_ranges; ++a) { - for (auto m = 0; m < num_max_detections; ++m) { - // The COCO PythonAPI assumes evaluations[] (the return value of - // COCOeval::EvaluateImages() is one long list storing results for each - // combination of category, area range, and image id, with categories in - // the outermost loop and images in the innermost loop. - const int64_t evaluations_index = - c * num_area_ranges * num_images + a * num_images; - int num_valid_ground_truth = BuildSortedDetectionList( - evaluations, - evaluations_index, - num_images, - max_detections[m], - &evaluation_indices, - &detection_scores, - &detection_sorted_indices, - &image_detection_indices); - - if (num_valid_ground_truth == 0) { - continue; - } - - for (auto t = 0; t < num_iou_thresholds; ++t) { - // recalls_out is a flattened vectors representing a - // num_iou_thresholds X num_categories X num_area_ranges X - // num_max_detections matrix - const int64_t recalls_out_index = - t * num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - // precisions_out and scores_out are flattened vectors - // representing a num_iou_thresholds X num_recall_thresholds X - // num_categories X num_area_ranges X num_max_detections matrix - const int64_t precisions_out_stride = - num_categories * num_area_ranges * num_max_detections; - const int64_t precisions_out_index = t * num_recall_thresholds * - num_categories * num_area_ranges * num_max_detections + - c * num_area_ranges * num_max_detections + - a * num_max_detections + m; - - ComputePrecisionRecallCurve( - precisions_out_index, - precisions_out_stride, - recalls_out_index, - recall_thresholds, - t, - num_iou_thresholds, - num_valid_ground_truth, - evaluations, - evaluation_indices, - detection_scores, - detection_sorted_indices, - image_detection_indices, - &precisions, - &recalls, - &precisions_out, - &scores_out, - &recalls_out); - } - } - } - } - - time_t rawtime; - struct tm local_time; - std::array buffer; - time(&rawtime); -#ifdef _WIN32 - localtime_s(&local_time, &rawtime); -#else - localtime_r(&rawtime, &local_time); -#endif - strftime( - buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); - return py::dict( - "params"_a = params, - "counts"_a = std::vector( - {num_iou_thresholds, - num_recall_thresholds, - num_categories, - num_area_ranges, - num_max_detections}), - "date"_a = buffer, - "precision"_a = precisions_out, - "recall"_a = recalls_out, - "scores"_a = scores_out); -} - -} // namespace COCOeval - -} // namespace detectron2 diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/sam_preditor.py b/spaces/OpenGVLab/InternGPT/iGPT/models/sam_preditor.py deleted file mode 100644 index e73f351c3f4d336a8bc6dc919e953453fca66613..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/sam_preditor.py +++ /dev/null @@ -1,258 +0,0 @@ -from segment_anything.modeling import Sam -# from segment_anything import SamPredictor as SamPredictorBase -import numpy as np -import torch - -from typing import Optional, Tuple - -from segment_anything.utils.transforms import ResizeLongestSide - -class SamPredictor: - def __init__( - self, - sam_model: Sam, - ) -> None: - """ - Uses SAM to calculate the image embedding for an image, and then - allow repeated, efficient mask prediction given prompts. - - Arguments: - sam_model (Sam): The model to use for mask prediction. - """ - super().__init__() - self.model = sam_model - self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) - # self.reset_image() - - - def set_image( - self, - image: np.ndarray, - image_format: str = "RGB", - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. - - Arguments: - image (np.ndarray): The image for calculating masks. Expects an - image in HWC uint8 format, with pixel values in [0, 255]. - image_format (str): The color format of the image, in ['RGB', 'BGR']. - """ - assert image_format in [ - "RGB", - "BGR", - ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." - if image_format != self.model.image_format: - image = image[..., ::-1] - - # Transform the image to the form expected by the model - input_image = self.transform.apply_image(image) - input_image_torch = torch.as_tensor(input_image, device=self.device) - input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] - - return self.set_torch_image(input_image_torch, image.shape[:2]) - - @torch.no_grad() - def set_torch_image( - self, - transformed_image: torch.Tensor, - original_image_size: Tuple[int, ...], - ) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. Expects the input - image to be already transformed to the format expected by the model. - - Arguments: - transformed_image (torch.Tensor): The input image, with shape - 1x3xHxW, which has been transformed with ResizeLongestSide. - original_image_size (tuple(int, int)): The size of the image - before transformation, in (H, W) format. - """ - assert ( - len(transformed_image.shape) == 4 - and transformed_image.shape[1] == 3 - and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size - ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." - # self.reset_image() - - original_size = original_image_size - input_size = tuple(transformed_image.shape[-2:]) - input_image = self.model.preprocess(transformed_image) - features = self.model.image_encoder(input_image) - # self.is_image_set = True - res = {'features': features, 'original_size': original_size, 'input_size': input_size} - return res - - def predict( - self, - features, - point_coords: Optional[np.ndarray] = None, - point_labels: Optional[np.ndarray] = None, - box: Optional[np.ndarray] = None, - mask_input: Optional[np.ndarray] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Predict masks for the given input prompts, using the currently set image. - - Arguments: - point_coords (np.ndarray or None): A Nx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (np.ndarray or None): A length N array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray or None): A length 4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form 1xHxW, where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. - """ - if features.get('features', None) is None: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - # Transform input prompts - coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None - if point_coords is not None: - assert ( - point_labels is not None - ), "point_labels must be supplied if point_coords is supplied." - point_coords = self.transform.apply_coords(point_coords, features['original_size']) - coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) - labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) - coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] - if box is not None: - box = self.transform.apply_boxes(box, features['original_size']) - box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) - box_torch = box_torch[None, :] - if mask_input is not None: - mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) - mask_input_torch = mask_input_torch[None, :, :, :] - - masks, iou_predictions, low_res_masks = self.predict_torch( - features, - coords_torch, - labels_torch, - box_torch, - mask_input_torch, - multimask_output, - return_logits=return_logits, - ) - - masks_np = masks[0].detach().cpu().numpy() - iou_predictions_np = iou_predictions[0].detach().cpu().numpy() - low_res_masks_np = low_res_masks[0].detach().cpu().numpy() - return masks_np, iou_predictions_np, low_res_masks_np - - @torch.no_grad() - def predict_torch( - self, - features, - point_coords: Optional[torch.Tensor], - point_labels: Optional[torch.Tensor], - boxes: Optional[torch.Tensor] = None, - mask_input: Optional[torch.Tensor] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - Input prompts are batched torch tensors and are expected to already be - transformed to the input frame using ResizeLongestSide. - - Arguments: - point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (torch.Tensor or None): A BxN array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - boxes (np.ndarray or None): A Bx4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form Bx1xHxW, where - for SAM, H=W=256. Masks returned by a previous iteration of the - predict method do not need further transformation. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (torch.Tensor): The output masks in BxCxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (torch.Tensor): An array of shape BxC containing the model's - predictions for the quality of each mask. - (torch.Tensor): An array of shape BxCxHxW, where C is the number - of masks and H=W=256. These low res logits can be passed to - a subsequent iteration as mask input. - """ - if features.get('features', None) is None: - raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") - - if point_coords is not None: - points = (point_coords, point_labels) - else: - points = None - - # Embed prompts - sparse_embeddings, dense_embeddings = self.model.prompt_encoder( - points=points, - boxes=boxes, - masks=mask_input, - ) - - # Predict masks - low_res_masks, iou_predictions = self.model.mask_decoder( - image_embeddings=features['features'], - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - ) - - # Upscale the masks to the original image resolution - masks = self.model.postprocess_masks(low_res_masks, features['input_size'], features['original_size']) - - if not return_logits: - masks = masks > self.model.mask_threshold - - return masks, iou_predictions, low_res_masks - - def get_image_embedding(self, image) -> torch.Tensor: - return self.set_image(image) - - @property - def device(self) -> torch.device: - return self.model.device - - # def reset_image(self) -> None: - # """Resets the currently set image.""" - # self.is_image_set = False - # self.features = None - # self.orig_h = None - # self.orig_w = None - # self.input_h = None - # self.input_w = None \ No newline at end of file diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/offscreen.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/offscreen.py deleted file mode 100644 index 340142983006cdc6f51b6d114e9b2b294aa4a919..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/offscreen.py +++ /dev/null @@ -1,160 +0,0 @@ -"""Wrapper for offscreen rendering. - -Author: Matthew Matl -""" -import os - -from .renderer import Renderer -from .constants import RenderFlags - - -class OffscreenRenderer(object): - """A wrapper for offscreen rendering. - - Parameters - ---------- - viewport_width : int - The width of the main viewport, in pixels. - viewport_height : int - The height of the main viewport, in pixels. - point_size : float - The size of screen-space points in pixels. - """ - - def __init__(self, viewport_width, viewport_height, point_size=1.0): - self.viewport_width = viewport_width - self.viewport_height = viewport_height - self.point_size = point_size - - self._platform = None - self._renderer = None - self._create() - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = int(value) - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = int(value) - - @property - def point_size(self): - """float : The pixel size of points in point clouds. - """ - return self._point_size - - @point_size.setter - def point_size(self, value): - self._point_size = float(value) - - def render(self, scene, flags=RenderFlags.NONE, seg_node_map=None): - """Render a scene with the given set of flags. - - Parameters - ---------- - scene : :class:`Scene` - A scene to render. - flags : int - A bitwise or of one or more flags from :class:`.RenderFlags`. - seg_node_map : dict - A map from :class:`.Node` objects to (3,) colors for each. - If specified along with flags set to :attr:`.RenderFlags.SEG`, - the color image will be a segmentation image. - - Returns - ------- - color_im : (h, w, 3) uint8 or (h, w, 4) uint8 - The color buffer in RGB format, or in RGBA format if - :attr:`.RenderFlags.RGBA` is set. - Not returned if flags includes :attr:`.RenderFlags.DEPTH_ONLY`. - depth_im : (h, w) float32 - The depth buffer in linear units. - """ - self._platform.make_current() - # If platform does not support dynamically-resizing framebuffers, - # destroy it and restart it - if (self._platform.viewport_height != self.viewport_height or - self._platform.viewport_width != self.viewport_width): - if not self._platform.supports_framebuffers(): - self.delete() - self._create() - - self._platform.make_current() - self._renderer.viewport_width = self.viewport_width - self._renderer.viewport_height = self.viewport_height - self._renderer.point_size = self.point_size - - if self._platform.supports_framebuffers(): - flags |= RenderFlags.OFFSCREEN - retval = self._renderer.render(scene, flags, seg_node_map) - else: - self._renderer.render(scene, flags, seg_node_map) - depth = self._renderer.read_depth_buf() - if flags & RenderFlags.DEPTH_ONLY: - retval = depth - else: - color = self._renderer.read_color_buf() - retval = color, depth - - # Make the platform not current - self._platform.make_uncurrent() - return retval - - def delete(self): - """Free all OpenGL resources. - """ - self._platform.make_current() - self._renderer.delete() - self._platform.delete_context() - del self._renderer - del self._platform - self._renderer = None - self._platform = None - import gc - gc.collect() - - def _create(self): - if 'PYOPENGL_PLATFORM' not in os.environ: - from pyrender.platforms.pyglet_platform import PygletPlatform - self._platform = PygletPlatform(self.viewport_width, - self.viewport_height) - elif os.environ['PYOPENGL_PLATFORM'] == 'egl': - from pyrender.platforms import egl - device_id = int(os.environ.get('EGL_DEVICE_ID', '0')) - egl_device = egl.get_device_by_index(device_id) - self._platform = egl.EGLPlatform(self.viewport_width, - self.viewport_height, - device=egl_device) - elif os.environ['PYOPENGL_PLATFORM'] == 'osmesa': - from pyrender.platforms.osmesa import OSMesaPlatform - self._platform = OSMesaPlatform(self.viewport_width, - self.viewport_height) - else: - raise ValueError('Unsupported PyOpenGL platform: {}'.format( - os.environ['PYOPENGL_PLATFORM'] - )) - self._platform.init_context() - self._platform.make_current() - self._renderer = Renderer(self.viewport_width, self.viewport_height) - - def __del__(self): - try: - self.delete() - except Exception: - pass - - -__all__ = ['OffscreenRenderer'] diff --git a/spaces/Polo45/README/README.md b/spaces/Polo45/README/README.md deleted file mode 100644 index b53b62a94edc8173852e04dbb82f09bae48bd8e5..0000000000000000000000000000000000000000 --- a/spaces/Polo45/README/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: README -emoji: 🦀 -colorFrom: blue -colorTo: purple -sdk: static -pinned: false ---- - -Edit this `README.md` markdown file to author your organization card 🔥 diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/msstftd.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/msstftd.py deleted file mode 100644 index 81a9100961c7a89a39df2643b24268fb90bfeaa4..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/msstftd.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import torchaudio -import torch -from torch import nn -from einops import rearrange - -from ...modules import NormConv2d -from .base import MultiDiscriminator, MultiDiscriminatorOutputType - - -def get_2d_padding(kernel_size: tp.Tuple[int, int], dilation: tp.Tuple[int, int] = (1, 1)): - return (((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2) - - -class DiscriminatorSTFT(nn.Module): - """STFT sub-discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_fft (int): Size of FFT for each scale. - hop_length (int): Length of hop between STFT windows for each scale. - kernel_size (tuple of int): Inner Conv2d kernel sizes. - stride (tuple of int): Inner Conv2d strides. - dilations (list of int): Inner Conv2d dilation on the time dimension. - win_length (int): Window size for each scale. - normalized (bool): Whether to normalize by magnitude after stft. - norm (str): Normalization method. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - growth (int): Growth factor for the filters. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, - n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024, max_filters: int = 1024, - filters_scale: int = 1, kernel_size: tp.Tuple[int, int] = (3, 9), dilations: tp.List = [1, 2, 4], - stride: tp.Tuple[int, int] = (1, 2), normalized: bool = True, norm: str = 'weight_norm', - activation: str = 'LeakyReLU', activation_params: dict = {'negative_slope': 0.2}): - super().__init__() - assert len(kernel_size) == 2 - assert len(stride) == 2 - self.filters = filters - self.in_channels = in_channels - self.out_channels = out_channels - self.n_fft = n_fft - self.hop_length = hop_length - self.win_length = win_length - self.normalized = normalized - self.activation = getattr(torch.nn, activation)(**activation_params) - self.spec_transform = torchaudio.transforms.Spectrogram( - n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window_fn=torch.hann_window, - normalized=self.normalized, center=False, pad_mode=None, power=None) - spec_channels = 2 * self.in_channels - self.convs = nn.ModuleList() - self.convs.append( - NormConv2d(spec_channels, self.filters, kernel_size=kernel_size, padding=get_2d_padding(kernel_size)) - ) - in_chs = min(filters_scale * self.filters, max_filters) - for i, dilation in enumerate(dilations): - out_chs = min((filters_scale ** (i + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, - dilation=(dilation, 1), padding=get_2d_padding(kernel_size, (dilation, 1)), - norm=norm)) - in_chs = out_chs - out_chs = min((filters_scale ** (len(dilations) + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm)) - self.conv_post = NormConv2d(out_chs, self.out_channels, - kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm) - - def forward(self, x: torch.Tensor): - fmap = [] - z = self.spec_transform(x) # [B, 2, Freq, Frames, 2] - z = torch.cat([z.real, z.imag], dim=1) - z = rearrange(z, 'b c w t -> b c t w') - for i, layer in enumerate(self.convs): - z = layer(z) - z = self.activation(z) - fmap.append(z) - z = self.conv_post(z) - return z, fmap - - -class MultiScaleSTFTDiscriminator(MultiDiscriminator): - """Multi-Scale STFT (MS-STFT) discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - sep_channels (bool): Separate channels to distinct samples for stereo support. - n_ffts (Sequence[int]): Size of FFT for each scale. - hop_lengths (Sequence[int]): Length of hop between STFT windows for each scale. - win_lengths (Sequence[int]): Window size for each scale. - **kwargs: Additional args for STFTDiscriminator. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, sep_channels: bool = False, - n_ffts: tp.List[int] = [1024, 2048, 512], hop_lengths: tp.List[int] = [256, 512, 128], - win_lengths: tp.List[int] = [1024, 2048, 512], **kwargs): - super().__init__() - assert len(n_ffts) == len(hop_lengths) == len(win_lengths) - self.sep_channels = sep_channels - self.discriminators = nn.ModuleList([ - DiscriminatorSTFT(filters, in_channels=in_channels, out_channels=out_channels, - n_fft=n_ffts[i], win_length=win_lengths[i], hop_length=hop_lengths[i], **kwargs) - for i in range(len(n_ffts)) - ]) - - @property - def num_discriminators(self): - return len(self.discriminators) - - def _separate_channels(self, x: torch.Tensor) -> torch.Tensor: - B, C, T = x.shape - return x.view(-1, 1, T) - - def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType: - logits = [] - fmaps = [] - for disc in self.discriminators: - logit, fmap = disc(x) - logits.append(logit) - fmaps.append(fmap) - return logits, fmaps diff --git a/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h deleted file mode 100644 index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h +++ /dev/null @@ -1,316 +0,0 @@ -// jpgd.h - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -#ifndef JPEG_DECODER_H -#define JPEG_DECODER_H - -#include -#include -#include - -namespace jpgd -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef signed int int32; - - // Loads a JPEG image from a memory buffer or a file. - // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA). - // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB). - // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly. - // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp. -// BEGIN EPIC MOD -//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps); - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format); -// END EPIC MOD - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps); - - // Success/failure error codes. - enum jpgd_status - { - JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1, - JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE, - JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS, - JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH, - JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER, - JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS, - JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE, - JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR, - JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM - }; - - // Input stream interface. - // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available. - // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set. - // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer. - // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding. - class jpeg_decoder_stream - { - public: - jpeg_decoder_stream() { } - virtual ~jpeg_decoder_stream() { } - - // The read() method is called when the internal input buffer is empty. - // Parameters: - // pBuf - input buffer - // max_bytes_to_read - maximum bytes that can be written to pBuf - // pEOF_flag - set this to true if at end of stream (no more bytes remaining) - // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0). - // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full. - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0; - }; - - // stdio FILE stream class. - class jpeg_decoder_file_stream : public jpeg_decoder_stream - { - jpeg_decoder_file_stream(const jpeg_decoder_file_stream &); - jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &); - - FILE *m_pFile; - bool m_eof_flag, m_error_flag; - - public: - jpeg_decoder_file_stream(); - virtual ~jpeg_decoder_file_stream(); - - bool open(const char *Pfilename); - void close(); - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Memory stream class. - class jpeg_decoder_mem_stream : public jpeg_decoder_stream - { - const uint8 *m_pSrc_data; - uint m_ofs, m_size; - - public: - jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { } - jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { } - - virtual ~jpeg_decoder_mem_stream() { } - - bool open(const uint8 *pSrc_data, uint size); - void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; } - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Loads JPEG file from a jpeg_decoder_stream. - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps); - - enum - { - JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4, - JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384 - }; - - typedef int16 jpgd_quant_t; - typedef int16 jpgd_block_t; - - class jpeg_decoder - { - public: - // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc. - // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline. - jpeg_decoder(jpeg_decoder_stream *pStream); - - ~jpeg_decoder(); - - // Call this method after constructing the object to begin decompression. - // If JPGD_SUCCESS is returned you may then call decode() on each scanline. - int begin_decoding(); - - // Returns the next scan line. - // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1). - // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4). - // Returns JPGD_SUCCESS if a scan line has been returned. - // Returns JPGD_DONE if all scan lines have been returned. - // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info. - int decode(const void** pScan_line, uint* pScan_line_len); - - inline jpgd_status get_error_code() const { return m_error_code; } - - inline int get_width() const { return m_image_x_size; } - inline int get_height() const { return m_image_y_size; } - - inline int get_num_components() const { return m_comps_in_frame; } - - inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; } - inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); } - - // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file). - inline int get_total_bytes_read() const { return m_total_bytes_read; } - - private: - jpeg_decoder(const jpeg_decoder &); - jpeg_decoder &operator =(const jpeg_decoder &); - - typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int); - - struct huff_tables - { - bool ac_table; - uint look_up[256]; - uint look_up2[256]; - uint8 code_size[256]; - uint tree[512]; - }; - - struct coeff_buf - { - uint8 *pData; - int block_num_x, block_num_y; - int block_len_x, block_len_y; - int block_size; - }; - - struct mem_block - { - mem_block *m_pNext; - size_t m_used_count; - size_t m_size; - char m_data[1]; - }; - - jmp_buf m_jmp_state; - mem_block *m_pMem_blocks; - int m_image_x_size; - int m_image_y_size; - jpeg_decoder_stream *m_pStream; - int m_progressive_flag; - uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES]; - uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size - uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size - jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables - int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported) - int m_comps_in_frame; // # of components in frame - int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor - int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor - int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector - int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID - int m_comp_h_blocks[JPGD_MAX_COMPONENTS]; - int m_comp_v_blocks[JPGD_MAX_COMPONENTS]; - int m_comps_in_scan; // # of components in scan - int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan - int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector - int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector - int m_spectral_start; // spectral selection start - int m_spectral_end; // spectral selection end - int m_successive_low; // successive approximation low - int m_successive_high; // successive approximation high - int m_max_mcu_x_size; // MCU's max. X size in pixels - int m_max_mcu_y_size; // MCU's max. Y size in pixels - int m_blocks_per_mcu; - int m_max_blocks_per_row; - int m_mcus_per_row, m_mcus_per_col; - int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU]; - int m_total_lines_left; // total # lines left in image - int m_mcu_lines_left; // total # lines left in this MCU - int m_real_dest_bytes_per_scan_line; - int m_dest_bytes_per_scan_line; // rounded up - int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y) - huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES]; - coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS]; - coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS]; - int m_eob_run; - int m_block_y_mcu[JPGD_MAX_COMPONENTS]; - uint8* m_pIn_buf_ofs; - int m_in_buf_left; - int m_tem_flag; - bool m_eof_flag; - uint8 m_in_buf_pad_start[128]; - uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128]; - uint8 m_in_buf_pad_end[128]; - int m_bits_left; - uint m_bit_buf; - int m_restart_interval; - int m_restarts_left; - int m_next_restart_num; - int m_max_mcus_per_row; - int m_max_blocks_per_mcu; - int m_expanded_blocks_per_mcu; - int m_expanded_blocks_per_row; - int m_expanded_blocks_per_component; - bool m_freq_domain_chroma_upsample; - int m_max_mcus_per_col; - uint m_last_dc_val[JPGD_MAX_COMPONENTS]; - jpgd_block_t* m_pMCU_coefficients; - int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU]; - uint8* m_pSample_buf; - int m_crr[256]; - int m_cbb[256]; - int m_crg[256]; - int m_cbg[256]; - uint8* m_pScan_line_0; - uint8* m_pScan_line_1; - jpgd_status m_error_code; - bool m_ready_flag; - int m_total_bytes_read; - - void free_all_blocks(); - // BEGIN EPIC MOD - UE_NORETURN void stop_decoding(jpgd_status status); - // END EPIC MOD - void *alloc(size_t n, bool zero = false); - void word_clear(void *p, uint16 c, uint n); - void prep_in_buffer(); - void read_dht_marker(); - void read_dqt_marker(); - void read_sof_marker(); - void skip_variable_marker(); - void read_dri_marker(); - void read_sos_marker(); - int next_marker(); - int process_markers(); - void locate_soi_marker(); - void locate_sof_marker(); - int locate_sos_marker(); - void init(jpeg_decoder_stream * pStream); - void create_look_ups(); - void fix_in_buffer(); - void transform_mcu(int mcu_row); - void transform_mcu_expand(int mcu_row); - coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y); - inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y); - void load_next_row(); - void decode_next_row(); - void make_huff_table(int index, huff_tables *pH); - void check_quant_tables(); - void check_huff_tables(); - void calc_mcu_block_order(); - int init_scan(); - void init_frame(); - void process_restart(); - void decode_scan(pDecode_block_func decode_block_func); - void init_progressive(); - void init_sequential(); - void decode_start(); - void decode_init(jpeg_decoder_stream * pStream); - void H2V2Convert(); - void H2V1Convert(); - void H1V2Convert(); - void H1V1Convert(); - void gray_convert(); - void expanded_convert(); - void find_eoi(); - inline uint get_char(); - inline uint get_char(bool *pPadding_flag); - inline void stuff_char(uint8 q); - inline uint8 get_octet(); - inline uint get_bits(int num_bits); - inline uint get_bits_no_markers(int numbits); - inline int huff_decode(huff_tables *pH); - inline int huff_decode(huff_tables *pH, int& extrabits); - static inline uint8 clamp(int i); - static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - }; - -} // namespace jpgd - -#endif // JPEG_DECODER_H diff --git a/spaces/RMXK/RVC_HFF/demucs/pretrained.py b/spaces/RMXK/RVC_HFF/demucs/pretrained.py deleted file mode 100644 index 6aac5db100cc7a9084af96d2cd083f0c8fac473c..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/demucs/pretrained.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# author: adefossez - -import logging - -from diffq import DiffQuantizer -import torch.hub - -from .model import Demucs -from .tasnet import ConvTasNet -from .utils import set_state - -logger = logging.getLogger(__name__) -ROOT = "https://dl.fbaipublicfiles.com/demucs/v3.0/" - -PRETRAINED_MODELS = { - 'demucs': 'e07c671f', - 'demucs48_hq': '28a1282c', - 'demucs_extra': '3646af93', - 'demucs_quantized': '07afea75', - 'tasnet': 'beb46fac', - 'tasnet_extra': 'df3777b2', - 'demucs_unittest': '09ebc15f', -} - -SOURCES = ["drums", "bass", "other", "vocals"] - - -def get_url(name): - sig = PRETRAINED_MODELS[name] - return ROOT + name + "-" + sig[:8] + ".th" - - -def is_pretrained(name): - return name in PRETRAINED_MODELS - - -def load_pretrained(name): - if name == "demucs": - return demucs(pretrained=True) - elif name == "demucs48_hq": - return demucs(pretrained=True, hq=True, channels=48) - elif name == "demucs_extra": - return demucs(pretrained=True, extra=True) - elif name == "demucs_quantized": - return demucs(pretrained=True, quantized=True) - elif name == "demucs_unittest": - return demucs_unittest(pretrained=True) - elif name == "tasnet": - return tasnet(pretrained=True) - elif name == "tasnet_extra": - return tasnet(pretrained=True, extra=True) - else: - raise ValueError(f"Invalid pretrained name {name}") - - -def _load_state(name, model, quantizer=None): - url = get_url(name) - state = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) - set_state(model, quantizer, state) - if quantizer: - quantizer.detach() - - -def demucs_unittest(pretrained=True): - model = Demucs(channels=4, sources=SOURCES) - if pretrained: - _load_state('demucs_unittest', model) - return model - - -def demucs(pretrained=True, extra=False, quantized=False, hq=False, channels=64): - if not pretrained and (extra or quantized or hq): - raise ValueError("if extra or quantized is True, pretrained must be True.") - model = Demucs(sources=SOURCES, channels=channels) - if pretrained: - name = 'demucs' - if channels != 64: - name += str(channels) - quantizer = None - if sum([extra, quantized, hq]) > 1: - raise ValueError("Only one of extra, quantized, hq, can be True.") - if quantized: - quantizer = DiffQuantizer(model, group_size=8, min_size=1) - name += '_quantized' - if extra: - name += '_extra' - if hq: - name += '_hq' - _load_state(name, model, quantizer) - return model - - -def tasnet(pretrained=True, extra=False): - if not pretrained and extra: - raise ValueError("if extra is True, pretrained must be True.") - model = ConvTasNet(X=10, sources=SOURCES) - if pretrained: - name = 'tasnet' - if extra: - name = 'tasnet_extra' - _load_state(name, model) - return model diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/structures.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/structures.py deleted file mode 100644 index 188e13e4829591facb23ae0e2eda84b9807cb818..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/structures.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -requests.structures -~~~~~~~~~~~~~~~~~~~ - -Data structures that power Requests. -""" - -from collections import OrderedDict - -from .compat import Mapping, MutableMapping - - -class CaseInsensitiveDict(MutableMapping): - """A case-insensitive ``dict``-like object. - - Implements all methods and operations of - ``MutableMapping`` as well as dict's ``copy``. Also - provides ``lower_items``. - - All keys are expected to be strings. The structure remembers the - case of the last key to be set, and ``iter(instance)``, - ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` - will contain case-sensitive keys. However, querying and contains - testing is case insensitive:: - - cid = CaseInsensitiveDict() - cid['Accept'] = 'application/json' - cid['aCCEPT'] == 'application/json' # True - list(cid) == ['Accept'] # True - - For example, ``headers['content-encoding']`` will return the - value of a ``'Content-Encoding'`` response header, regardless - of how the header name was originally stored. - - If the constructor, ``.update``, or equality comparison - operations are given keys that have equal ``.lower()``s, the - behavior is undefined. - """ - - def __init__(self, data=None, **kwargs): - self._store = OrderedDict() - if data is None: - data = {} - self.update(data, **kwargs) - - def __setitem__(self, key, value): - # Use the lowercased key for lookups, but store the actual - # key alongside the value. - self._store[key.lower()] = (key, value) - - def __getitem__(self, key): - return self._store[key.lower()][1] - - def __delitem__(self, key): - del self._store[key.lower()] - - def __iter__(self): - return (casedkey for casedkey, mappedvalue in self._store.values()) - - def __len__(self): - return len(self._store) - - def lower_items(self): - """Like iteritems(), but with all lowercase keys.""" - return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) - - def __eq__(self, other): - if isinstance(other, Mapping): - other = CaseInsensitiveDict(other) - else: - return NotImplemented - # Compare insensitively - return dict(self.lower_items()) == dict(other.lower_items()) - - # Copy is required - def copy(self): - return CaseInsensitiveDict(self._store.values()) - - def __repr__(self): - return str(dict(self.items())) - - -class LookupDict(dict): - """Dictionary lookup object.""" - - def __init__(self, name=None): - self.name = name - super().__init__() - - def __repr__(self): - return f"" - - def __getitem__(self, key): - # We allow fall-through here, so values default to None - - return self.__dict__.get(key, None) - - def get(self, key, default=None): - return self.__dict__.get(key, default) diff --git a/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/base_model.py b/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/base_model.py deleted file mode 100644 index 8de1d16f0c7fa52d8067139abc6e769e96d0a6a1..0000000000000000000000000000000000000000 --- a/spaces/Reha2704/VToonify/vtoonify/model/stylegan/lpips/base_model.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import numpy as np -import torch -from torch.autograd import Variable -from pdb import set_trace as st -from IPython import embed - -class BaseModel(): - def __init__(self): - pass; - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True, gpu_ids=[0]): - self.use_gpu = use_gpu - self.gpu_ids = gpu_ids - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s'%save_path) - network.load_state_dict(torch.load(save_path)) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'),flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py deleted file mode 100644 index 57d032fe37ed82d5ba24e761bdc014cc0ee5ac64..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py +++ /dev/null @@ -1,122 +0,0 @@ -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .detectors_resnet import Bottleneck as _Bottleneck -from .detectors_resnet import DetectoRS_ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - elif not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@BACKBONES.register_module() -class DetectoRS_ResNeXt(DetectoRS_ResNet): - """ResNeXt backbone for DetectoRS. - - Args: - groups (int): The number of groups in ResNeXt. - base_width (int): The base width of ResNeXt. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(DetectoRS_ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - return super().make_res_layer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/backbones/mobilenet_v3.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/backbones/mobilenet_v3.py deleted file mode 100644 index 16817400b4102899794fe64c9644713a4e54e2f9..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/backbones/mobilenet_v3.py +++ /dev/null @@ -1,255 +0,0 @@ -import logging - -import annotator.uniformer.mmcv as mmcv -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule, constant_init, kaiming_init -from annotator.uniformer.mmcv.cnn.bricks import Conv2dAdaptivePadding -from annotator.uniformer.mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import InvertedResidualV3 as InvertedResidual - - -@BACKBONES.register_module() -class MobileNetV3(nn.Module): - """MobileNetV3 backbone. - - This backbone is the improved implementation of `Searching for MobileNetV3 - `_. - - Args: - arch (str): Architecture of mobilnetv3, from {'small', 'large'}. - Default: 'small'. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - out_indices (tuple[int]): Output from which layer. - Default: (0, 1, 12). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save - some memory while slowing down the training speed. - Default: False. - """ - # Parameters to build each block: - # [kernel size, mid channels, out channels, with_se, act type, stride] - arch_settings = { - 'small': [[3, 16, 16, True, 'ReLU', 2], # block0 layer1 os=4 - [3, 72, 24, False, 'ReLU', 2], # block1 layer2 os=8 - [3, 88, 24, False, 'ReLU', 1], - [5, 96, 40, True, 'HSwish', 2], # block2 layer4 os=16 - [5, 240, 40, True, 'HSwish', 1], - [5, 240, 40, True, 'HSwish', 1], - [5, 120, 48, True, 'HSwish', 1], # block3 layer7 os=16 - [5, 144, 48, True, 'HSwish', 1], - [5, 288, 96, True, 'HSwish', 2], # block4 layer9 os=32 - [5, 576, 96, True, 'HSwish', 1], - [5, 576, 96, True, 'HSwish', 1]], - 'large': [[3, 16, 16, False, 'ReLU', 1], # block0 layer1 os=2 - [3, 64, 24, False, 'ReLU', 2], # block1 layer2 os=4 - [3, 72, 24, False, 'ReLU', 1], - [5, 72, 40, True, 'ReLU', 2], # block2 layer4 os=8 - [5, 120, 40, True, 'ReLU', 1], - [5, 120, 40, True, 'ReLU', 1], - [3, 240, 80, False, 'HSwish', 2], # block3 layer7 os=16 - [3, 200, 80, False, 'HSwish', 1], - [3, 184, 80, False, 'HSwish', 1], - [3, 184, 80, False, 'HSwish', 1], - [3, 480, 112, True, 'HSwish', 1], # block4 layer11 os=16 - [3, 672, 112, True, 'HSwish', 1], - [5, 672, 160, True, 'HSwish', 2], # block5 layer13 os=32 - [5, 960, 160, True, 'HSwish', 1], - [5, 960, 160, True, 'HSwish', 1]] - } # yapf: disable - - def __init__(self, - arch='small', - conv_cfg=None, - norm_cfg=dict(type='BN'), - out_indices=(0, 1, 12), - frozen_stages=-1, - reduction_factor=1, - norm_eval=False, - with_cp=False): - super(MobileNetV3, self).__init__() - assert arch in self.arch_settings - assert isinstance(reduction_factor, int) and reduction_factor > 0 - assert mmcv.is_tuple_of(out_indices, int) - for index in out_indices: - if index not in range(0, len(self.arch_settings[arch]) + 2): - raise ValueError( - 'the item in out_indices must in ' - f'range(0, {len(self.arch_settings[arch])+2}). ' - f'But received {index}') - - if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): - raise ValueError('frozen_stages must be in range(-1, ' - f'{len(self.arch_settings[arch])+2}). ' - f'But received {frozen_stages}') - self.arch = arch - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.reduction_factor = reduction_factor - self.norm_eval = norm_eval - self.with_cp = with_cp - self.layers = self._make_layer() - - def _make_layer(self): - layers = [] - - # build the first layer (layer0) - in_channels = 16 - layer = ConvModule( - in_channels=3, - out_channels=in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=dict(type='Conv2dAdaptivePadding'), - norm_cfg=self.norm_cfg, - act_cfg=dict(type='HSwish')) - self.add_module('layer0', layer) - layers.append('layer0') - - layer_setting = self.arch_settings[self.arch] - for i, params in enumerate(layer_setting): - (kernel_size, mid_channels, out_channels, with_se, act, - stride) = params - - if self.arch == 'large' and i >= 12 or self.arch == 'small' and \ - i >= 8: - mid_channels = mid_channels // self.reduction_factor - out_channels = out_channels // self.reduction_factor - - if with_se: - se_cfg = dict( - channels=mid_channels, - ratio=4, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=3.0, divisor=6.0))) - else: - se_cfg = None - - layer = InvertedResidual( - in_channels=in_channels, - out_channels=out_channels, - mid_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - se_cfg=se_cfg, - with_expand_conv=(in_channels != mid_channels), - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type=act), - with_cp=self.with_cp) - in_channels = out_channels - layer_name = 'layer{}'.format(i + 1) - self.add_module(layer_name, layer) - layers.append(layer_name) - - # build the last layer - # block5 layer12 os=32 for small model - # block6 layer16 os=32 for large model - layer = ConvModule( - in_channels=in_channels, - out_channels=576 if self.arch == 'small' else 960, - kernel_size=1, - stride=1, - dilation=4, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=dict(type='HSwish')) - layer_name = 'layer{}'.format(len(layer_setting) + 1) - self.add_module(layer_name, layer) - layers.append(layer_name) - - # next, convert backbone MobileNetV3 to a semantic segmentation version - if self.arch == 'small': - self.layer4.depthwise_conv.conv.stride = (1, 1) - self.layer9.depthwise_conv.conv.stride = (1, 1) - for i in range(4, len(layers)): - layer = getattr(self, layers[i]) - if isinstance(layer, InvertedResidual): - modified_module = layer.depthwise_conv.conv - else: - modified_module = layer.conv - - if i < 9: - modified_module.dilation = (2, 2) - pad = 2 - else: - modified_module.dilation = (4, 4) - pad = 4 - - if not isinstance(modified_module, Conv2dAdaptivePadding): - # Adjust padding - pad *= (modified_module.kernel_size[0] - 1) // 2 - modified_module.padding = (pad, pad) - else: - self.layer7.depthwise_conv.conv.stride = (1, 1) - self.layer13.depthwise_conv.conv.stride = (1, 1) - for i in range(7, len(layers)): - layer = getattr(self, layers[i]) - if isinstance(layer, InvertedResidual): - modified_module = layer.depthwise_conv.conv - else: - modified_module = layer.conv - - if i < 13: - modified_module.dilation = (2, 2) - pad = 2 - else: - modified_module.dilation = (4, 4) - pad = 4 - - if not isinstance(modified_module, Conv2dAdaptivePadding): - # Adjust padding - pad *= (modified_module.kernel_size[0] - 1) // 2 - modified_module.padding = (pad, pad) - - return layers - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - return outs - - def _freeze_stages(self): - for i in range(self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(MobileNetV3, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/emotion/test_emotion.py b/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/emotion/test_emotion.py deleted file mode 100644 index 97d702e2b426e0e9e8fb821a8f91183cf7631224..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/emotion/test_emotion.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -""" -Run inference for pre-processed data with a trained model. -""" - -import logging -import math -import numpy, math, pdb, sys, random -import time, os, itertools, shutil, importlib -import argparse -import os -import sys -import glob -from sklearn import metrics -import soundfile as sf -#import sentencepiece as spm -import torch -import inference as encoder -import torch.nn as nn -import torch.nn.functional as F -from pathlib import Path -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -from resemblyzer import VoiceEncoder, preprocess_wav - - -def tuneThresholdfromScore(scores, labels, target_fa, target_fr=None): - fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1) - fnr = 1 - tpr - - fnr = fnr * 100 - fpr = fpr * 100 - - tunedThreshold = []; - if target_fr: - for tfr in target_fr: - idx = numpy.nanargmin(numpy.absolute((tfr - fnr))) - tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); - - for tfa in target_fa: - idx = numpy.nanargmin(numpy.absolute((tfa - fpr))) # numpy.where(fpr<=tfa)[0][-1] - tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); - - idxE = numpy.nanargmin(numpy.absolute((fnr - fpr))) - eer = max(fpr[idxE], fnr[idxE]) - - return (tunedThreshold, eer, fpr, fnr); - - -def loadWAV(filename, max_frames, evalmode=True, num_eval=10): - # Maximum audio length - max_audio = max_frames * 160 + 240 - - # Read wav file and convert to torch tensor - audio,sample_rate = sf.read(filename) - - feats_v0 = torch.from_numpy(audio).float() - audiosize = audio.shape[0] - - if audiosize <= max_audio: - shortage = math.floor((max_audio - audiosize + 1) / 2) - audio = numpy.pad(audio, (shortage, shortage), 'constant', constant_values=0) - audiosize = audio.shape[0] - - if evalmode: - startframe = numpy.linspace(0, audiosize - max_audio, num=num_eval) - else: - startframe = numpy.array([numpy.int64(random.random() * (audiosize - max_audio))]) - feats = [] - if evalmode and max_frames == 0: - feats.append(audio) - else: - for asf in startframe: - feats.append(audio[int(asf):int(asf) + max_audio]) - feat = numpy.stack(feats, axis=0) - feat = torch.FloatTensor(feat) - return feat; - -def evaluateFromList(listfilename, print_interval=100, test_path='', multi=False): - - lines = [] - files = [] - feats = {} - tstart = time.time() - - ## Read all lines - with open(listfilename) as listfile: - while True: - line = listfile.readline(); - if (not line): - break; - - data = line.split(); - - ## Append random label if missing - if len(data) == 2: data = [random.randint(0,1)] + data - - files.append(data[1]) - files.append(data[2]) - lines.append(line) - - setfiles = list(set(files)) - setfiles.sort() - ## Save all features to file - for idx, file in enumerate(setfiles): - # preprocessed_wav = encoder.preprocess_wav(os.path.join(test_path,file)) - # embed = encoder.embed_utterance(preprocessed_wav) - processed_wav = preprocess_wav(os.path.join(test_path,file)) - embed = voice_encoder.embed_utterance(processed_wav) - - torch.cuda.empty_cache() - ref_feat = torch.from_numpy(embed).unsqueeze(0) - - feats[file] = ref_feat - - telapsed = time.time() - tstart - - if idx % print_interval == 0: - sys.stdout.write("\rReading %d of %d: %.2f Hz, embedding size %d"%(idx,len(setfiles),idx/telapsed,ref_feat.size()[1])); - - print('') - all_scores = []; - all_labels = []; - all_trials = []; - tstart = time.time() - - ## Read files and compute all scores - for idx, line in enumerate(lines): - - data = line.split(); - ## Append random label if missing - if len(data) == 2: data = [random.randint(0,1)] + data - - ref_feat = feats[data[1]] - com_feat = feats[data[2]] - ref_feat = ref_feat.cuda() - com_feat = com_feat.cuda() - # normalize feats - ref_feat = F.normalize(ref_feat, p=2, dim=1) - com_feat = F.normalize(com_feat, p=2, dim=1) - - dist = F.pairwise_distance(ref_feat.unsqueeze(-1), com_feat.unsqueeze(-1)).detach().cpu().numpy(); - - score = -1 * numpy.mean(dist); - - all_scores.append(score); - all_labels.append(int(data[0])); - all_trials.append(data[1]+" "+data[2]) - - if idx % print_interval == 0: - telapsed = time.time() - tstart - sys.stdout.write("\rComputing %d of %d: %.2f Hz"%(idx,len(lines),idx/telapsed)); - sys.stdout.flush(); - - print('\n') - - return (all_scores, all_labels, all_trials); - - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser("baseline") - parser.add_argument("--data_root", type=str, help="", required=True) - parser.add_argument("--list", type=str, help="", required=True) - parser.add_argument("--model_dir", type=str, help="model parameters for AudioEncoder", required=True) - - args = parser.parse_args() - - - # Load the models one by one. - print("Preparing the encoder...") - # encoder.load_model(Path(args.model_dir)) - print("Insert the wav file name...") - voice_encoder = VoiceEncoder().cuda() - - sc, lab, trials = evaluateFromList(args.list, print_interval=100, test_path=args.data_root) - result = tuneThresholdfromScore(sc, lab, [1, 0.1]); - print('EER %2.4f'%result[1]) diff --git a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/models.py b/spaces/SQSora/VITS-Umamusume-voice-synthesizer/models.py deleted file mode 100644 index 7dcd22edf811b952514080f5f06cc43d635ead28..0000000000000000000000000000000000000000 --- a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/models.py +++ /dev/null @@ -1,542 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab!=0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emotion_emb = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab!=0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - x = x + self.emotion_emb(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/Selim321/image2image-stable-diffusion/README.md b/spaces/Selim321/image2image-stable-diffusion/README.md deleted file mode 100644 index d66e55f8faf576b61243c34e7a47058594a300b0..0000000000000000000000000000000000000000 --- a/spaces/Selim321/image2image-stable-diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image2image Stable Diffusion -emoji: 😻 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ServerX/PorcoDiaz/lib/infer_pack/commons.py b/spaces/ServerX/PorcoDiaz/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Smols/Ilinalta/Dockerfile b/spaces/Smols/Ilinalta/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/Smols/Ilinalta/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/console.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/console.py deleted file mode 100644 index 65571a7572df6b372e8971d95367392801149151..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/console.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Shim to maintain backwards compatibility with old IPython.terminal.console imports. -""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import sys -from warnings import warn - -from IPython.utils.shimmodule import ShimModule, ShimWarning - -warn("The `IPython.terminal.console` package has been deprecated since IPython 4.0. " - "You should import from jupyter_console instead.", ShimWarning) - -# Unconditionally insert the shim into sys.modules so that further import calls -# trigger the custom attribute access above - -sys.modules['IPython.terminal.console'] = ShimModule( - src='IPython.terminal.console', mirror='jupyter_console') diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py deleted file mode 100644 index 66e400fbf7050aa5d815fe534b56f054d584062a..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py +++ /dev/null @@ -1,116 +0,0 @@ -from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock -from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame -from _pydev_bundle._pydev_saved_modules import thread, threading -import sys -from _pydev_bundle import pydev_log - -DEBUG = False - - -class CustomFramesContainer: - - # Actual Values initialized later on. - custom_frames_lock = None # : :type custom_frames_lock: threading.Lock - - custom_frames = None - - _next_frame_id = None - - _py_db_command_thread_event = None - - -def custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function) - - CustomFramesContainer.custom_frames_lock = ForkSafeLock() - - # custom_frames can only be accessed if properly locked with custom_frames_lock! - # Key is a string identifying the frame (as well as the thread it belongs to). - # Value is a CustomFrame. - # - CustomFramesContainer.custom_frames = {} - - # Only to be used in this module - CustomFramesContainer._next_frame_id = 0 - - # This is the event we must set to release an internal process events. It's later set by the actual debugger - # when we do create the debugger. - CustomFramesContainer._py_db_command_thread_event = Null() - - -# Initialize it the first time (it may be reinitialized later on when dealing with a fork). -custom_frames_container_init() - - -class CustomFrame: - - def __init__(self, name, frame, thread_id): - # 0 = string with the representation of that frame - self.name = name - - # 1 = the frame to show - self.frame = frame - - # 2 = an integer identifying the last time the frame was changed. - self.mod_time = 0 - - # 3 = the thread id of the given frame - self.thread_id = thread_id - - -def add_custom_frame(frame, name, thread_id): - ''' - It's possible to show paused frames by adding a custom frame through this API (it's - intended to be used for coroutines, but could potentially be used for generators too). - - :param frame: - The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused. - - :param name: - The name to be shown for the custom thread in the UI. - - :param thread_id: - The thread id to which this frame is related (must match thread.ident). - - :return: str - Returns the custom thread id which will be used to show the given frame paused. - ''' - with CustomFramesContainer.custom_frames_lock: - curr_thread_id = get_current_thread_id(threading.current_thread()) - next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1 - - # Note: the frame id kept contains an id and thread information on the thread where the frame was added - # so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id). - frame_custom_thread_id = '__frame__:%s|%s' % (next_id, curr_thread_id) - if DEBUG: - sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % ( - frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name)) - - CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id) - CustomFramesContainer._py_db_command_thread_event.set() - return frame_custom_thread_id - - -def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None): - with CustomFramesContainer.custom_frames_lock: - if DEBUG: - sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id) - try: - old = CustomFramesContainer.custom_frames[frame_custom_thread_id] - if name is not None: - old.name = name - old.mod_time += 1 - old.thread_id = thread_id - except: - sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,)) - pydev_log.exception() - - CustomFramesContainer._py_db_command_thread_event.set() - - -def remove_custom_frame(frame_custom_thread_id): - with CustomFramesContainer.custom_frames_lock: - if DEBUG: - sys.stderr.write('remove_custom_frame: %s\n' % frame_custom_thread_id) - CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None) - CustomFramesContainer._py_db_command_thread_event.set() - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py deleted file mode 100644 index 507e73be2481c064a04777f28cadb48cc7177f70..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py +++ /dev/null @@ -1,433 +0,0 @@ -""" -Based on the python xreload. - -Changes -====================== - -1. we don't recreate the old namespace from new classes. Rather, we keep the existing namespace, -load a new version of it and update only some of the things we can inplace. That way, we don't break -things such as singletons or end up with a second representation of the same class in memory. - -2. If we find it to be a __metaclass__, we try to update it as a regular class. - -3. We don't remove old attributes (and leave them lying around even if they're no longer used). - -4. Reload hooks were changed - -These changes make it more stable, especially in the common case (where in a debug session only the -contents of a function are changed), besides providing flexibility for users that want to extend -on it. - - - -Hooks -====================== - -Classes/modules can be specially crafted to work with the reload (so that it can, for instance, -update some constant which was changed). - -1. To participate in the change of some attribute: - - In a module: - - __xreload_old_new__(namespace, name, old, new) - - in a class: - - @classmethod - __xreload_old_new__(cls, name, old, new) - - A class or module may include a method called '__xreload_old_new__' which is called when we're - unable to reload a given attribute. - - - -2. To do something after the whole reload is finished: - - In a module: - - __xreload_after_reload_update__(namespace): - - In a class: - - @classmethod - __xreload_after_reload_update__(cls): - - - A class or module may include a method called '__xreload_after_reload_update__' which is called - after the reload finishes. - - -Important: when providing a hook, always use the namespace or cls provided and not anything in the global -namespace, as the global namespace are only temporarily created during the reload and may not reflect the -actual application state (while the cls and namespace passed are). - - -Current limitations -====================== - - -- Attributes/constants are added, but not changed (so singletons and the application state is not - broken -- use provided hooks to workaround it). - -- Code using metaclasses may not always work. - -- Functions and methods using decorators (other than classmethod and staticmethod) are not handled - correctly. - -- Renamings are not handled correctly. - -- Dependent modules are not reloaded. - -- New __slots__ can't be added to existing classes. - - -Info -====================== - -Original: http://svn.python.org/projects/sandbox/trunk/xreload/xreload.py -Note: it seems https://github.com/plone/plone.reload/blob/master/plone/reload/xreload.py enhances it (to check later) - -Interesting alternative: https://code.google.com/p/reimport/ - -Alternative to reload(). - -This works by executing the module in a scratch namespace, and then patching classes, methods and -functions in place. This avoids the need to patch instances. New objects are copied into the -target namespace. - -""" - -from _pydev_bundle.pydev_imports import execfile -from _pydevd_bundle import pydevd_dont_trace -import types -from _pydev_bundle import pydev_log -from _pydevd_bundle.pydevd_constants import get_global_debugger - -NO_DEBUG = 0 -LEVEL1 = 1 -LEVEL2 = 2 - -DEBUG = NO_DEBUG - - -def write_err(*args): - py_db = get_global_debugger() - if py_db is not None: - new_lst = [] - for a in args: - new_lst.append(str(a)) - - msg = ' '.join(new_lst) - s = 'code reload: %s\n' % (msg,) - cmd = py_db.cmd_factory.make_io_message(s, 2) - if py_db.writer is not None: - py_db.writer.add_command(cmd) - - -def notify_info0(*args): - write_err(*args) - - -def notify_info(*args): - if DEBUG >= LEVEL1: - write_err(*args) - - -def notify_info2(*args): - if DEBUG >= LEVEL2: - write_err(*args) - - -def notify_error(*args): - write_err(*args) - - -#======================================================================================================================= -# code_objects_equal -#======================================================================================================================= -def code_objects_equal(code0, code1): - for d in dir(code0): - if d.startswith('_') or 'line' in d or d in ('replace', 'co_positions', 'co_qualname'): - continue - if getattr(code0, d) != getattr(code1, d): - return False - return True - - -#======================================================================================================================= -# xreload -#======================================================================================================================= -def xreload(mod): - """Reload a module in place, updating classes, methods and functions. - - mod: a module object - - Returns a boolean indicating whether a change was done. - """ - r = Reload(mod) - r.apply() - found_change = r.found_change - r = None - pydevd_dont_trace.clear_trace_filter_cache() - return found_change - -# This isn't actually used... Initially I planned to reload variables which are immutable on the -# namespace, but this can destroy places where we're saving state, which may not be what we want, -# so, we're being conservative and giving the user hooks if he wants to do a reload. -# -# immutable_types = [int, str, float, tuple] #That should be common to all Python versions -# -# for name in 'long basestr unicode frozenset'.split(): -# try: -# immutable_types.append(__builtins__[name]) -# except: -# pass #Just ignore: not all python versions are created equal. -# immutable_types = tuple(immutable_types) - - -#======================================================================================================================= -# Reload -#======================================================================================================================= -class Reload: - - def __init__(self, mod, mod_name=None, mod_filename=None): - self.mod = mod - if mod_name: - self.mod_name = mod_name - else: - self.mod_name = mod.__name__ if mod is not None else None - - if mod_filename: - self.mod_filename = mod_filename - else: - self.mod_filename = mod.__file__ if mod is not None else None - - self.found_change = False - - def apply(self): - mod = self.mod - self._on_finish_callbacks = [] - try: - # Get the module namespace (dict) early; this is part of the type check - modns = mod.__dict__ - - # Execute the code. We copy the module dict to a temporary; then - # clear the module dict; then execute the new code in the module - # dict; then swap things back and around. This trick (due to - # Glyph Lefkowitz) ensures that the (readonly) __globals__ - # attribute of methods and functions is set to the correct dict - # object. - new_namespace = modns.copy() - new_namespace.clear() - if self.mod_filename: - new_namespace["__file__"] = self.mod_filename - try: - new_namespace["__builtins__"] = __builtins__ - except NameError: - raise # Ok if not there. - - if self.mod_name: - new_namespace["__name__"] = self.mod_name - if new_namespace["__name__"] == '__main__': - # We do this because usually the __main__ starts-up the program, guarded by - # the if __name__ == '__main__', but we don't want to start the program again - # on a reload. - new_namespace["__name__"] = '__main_reloaded__' - - execfile(self.mod_filename, new_namespace, new_namespace) - # Now we get to the hard part - oldnames = set(modns) - newnames = set(new_namespace) - - # Create new tokens (note: not deleting existing) - for name in newnames - oldnames: - notify_info0('Added:', name, 'to namespace') - self.found_change = True - modns[name] = new_namespace[name] - - # Update in-place what we can - for name in oldnames & newnames: - self._update(modns, name, modns[name], new_namespace[name]) - - self._handle_namespace(modns) - - for c in self._on_finish_callbacks: - c() - del self._on_finish_callbacks[:] - except: - pydev_log.exception() - - def _handle_namespace(self, namespace, is_class_namespace=False): - on_finish = None - if is_class_namespace: - xreload_after_update = getattr(namespace, '__xreload_after_reload_update__', None) - if xreload_after_update is not None: - self.found_change = True - on_finish = lambda: xreload_after_update() - - elif '__xreload_after_reload_update__' in namespace: - xreload_after_update = namespace['__xreload_after_reload_update__'] - self.found_change = True - on_finish = lambda: xreload_after_update(namespace) - - if on_finish is not None: - # If a client wants to know about it, give him a chance. - self._on_finish_callbacks.append(on_finish) - - def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False): - """Update oldobj, if possible in place, with newobj. - - If oldobj is immutable, this simply returns newobj. - - Args: - oldobj: the object to be updated - newobj: the object used as the source for the update - """ - try: - notify_info2('Updating: ', oldobj) - if oldobj is newobj: - # Probably something imported - return - - if type(oldobj) is not type(newobj): - # Cop-out: if the type changed, give up - if name not in ('__builtins__',): - notify_error('Type of: %s (old: %s != new: %s) changed... Skipping.' % (name, type(oldobj), type(newobj))) - return - - if isinstance(newobj, types.FunctionType): - self._update_function(oldobj, newobj) - return - - if isinstance(newobj, types.MethodType): - self._update_method(oldobj, newobj) - return - - if isinstance(newobj, classmethod): - self._update_classmethod(oldobj, newobj) - return - - if isinstance(newobj, staticmethod): - self._update_staticmethod(oldobj, newobj) - return - - if hasattr(types, 'ClassType'): - classtype = (types.ClassType, type) # object is not instance of types.ClassType. - else: - classtype = type - - if isinstance(newobj, classtype): - self._update_class(oldobj, newobj) - return - - # New: dealing with metaclasses. - if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__: - self._update_class(oldobj, newobj) - return - - if namespace is not None: - # Check for the `__xreload_old_new__` protocol (don't even compare things - # as even doing a comparison may break things -- see: https://github.com/microsoft/debugpy/issues/615). - xreload_old_new = None - if is_class_namespace: - xreload_old_new = getattr(namespace, '__xreload_old_new__', None) - if xreload_old_new is not None: - self.found_change = True - xreload_old_new(name, oldobj, newobj) - - elif '__xreload_old_new__' in namespace: - xreload_old_new = namespace['__xreload_old_new__'] - xreload_old_new(namespace, name, oldobj, newobj) - self.found_change = True - - # Too much information to the user... - # else: - # notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,)) - - except: - notify_error('Exception found when updating %s. Proceeding for other items.' % (name,)) - pydev_log.exception() - - # All of the following functions have the same signature as _update() - - def _update_function(self, oldfunc, newfunc): - """Update a function object.""" - oldfunc.__doc__ = newfunc.__doc__ - oldfunc.__dict__.update(newfunc.__dict__) - - try: - newfunc.__code__ - attr_name = '__code__' - except AttributeError: - newfunc.func_code - attr_name = 'func_code' - - old_code = getattr(oldfunc, attr_name) - new_code = getattr(newfunc, attr_name) - if not code_objects_equal(old_code, new_code): - notify_info0('Updated function code:', oldfunc) - setattr(oldfunc, attr_name, new_code) - self.found_change = True - - try: - oldfunc.__defaults__ = newfunc.__defaults__ - except AttributeError: - oldfunc.func_defaults = newfunc.func_defaults - - return oldfunc - - def _update_method(self, oldmeth, newmeth): - """Update a method object.""" - # XXX What if im_func is not a function? - if hasattr(oldmeth, 'im_func') and hasattr(newmeth, 'im_func'): - self._update(None, None, oldmeth.im_func, newmeth.im_func) - elif hasattr(oldmeth, '__func__') and hasattr(newmeth, '__func__'): - self._update(None, None, oldmeth.__func__, newmeth.__func__) - return oldmeth - - def _update_class(self, oldclass, newclass): - """Update a class object.""" - olddict = oldclass.__dict__ - newdict = newclass.__dict__ - - oldnames = set(olddict) - newnames = set(newdict) - - for name in newnames - oldnames: - setattr(oldclass, name, newdict[name]) - notify_info0('Added:', name, 'to', oldclass) - self.found_change = True - - # Note: not removing old things... - # for name in oldnames - newnames: - # notify_info('Removed:', name, 'from', oldclass) - # delattr(oldclass, name) - - for name in (oldnames & newnames) - set(['__dict__', '__doc__']): - self._update(oldclass, name, olddict[name], newdict[name], is_class_namespace=True) - - old_bases = getattr(oldclass, '__bases__', None) - new_bases = getattr(newclass, '__bases__', None) - if str(old_bases) != str(new_bases): - notify_error('Changing the hierarchy of a class is not supported. %s may be inconsistent.' % (oldclass,)) - - self._handle_namespace(oldclass, is_class_namespace=True) - - def _update_classmethod(self, oldcm, newcm): - """Update a classmethod update.""" - # While we can't modify the classmethod object itself (it has no - # mutable attributes), we *can* extract the underlying function - # (by calling __get__(), which returns a method object) and update - # it in-place. We don't have the class available to pass to - # __get__() but any object except None will do. - self._update(None, None, oldcm.__get__(0), newcm.__get__(0)) - - def _update_staticmethod(self, oldsm, newsm): - """Update a staticmethod update.""" - # While we can't modify the staticmethod object itself (it has no - # mutable attributes), we *can* extract the underlying function - # (by calling __get__(), which returns it) and update it in-place. - # We don't have the class available to pass to __get__() but any - # object except None will do. - self._update(None, None, oldsm.__get__(0), newsm.__get__(0)) diff --git a/spaces/Superlang/ImageProcessor/annotator/midas/midas/dpt_depth.py b/spaces/Superlang/ImageProcessor/annotator/midas/midas/dpt_depth.py deleted file mode 100644 index 4e9aab5d2767dffea39da5b3f30e2798688216f1..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/midas/midas/dpt_depth.py +++ /dev/null @@ -1,109 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .base_model import BaseModel -from .blocks import ( - FeatureFusionBlock, - FeatureFusionBlock_custom, - Interpolate, - _make_encoder, - forward_vit, -) - - -def _make_fusion_block(features, use_bn): - return FeatureFusionBlock_custom( - features, - nn.ReLU(False), - deconv=False, - bn=use_bn, - expand=False, - align_corners=True, - ) - - -class DPT(BaseModel): - def __init__( - self, - head, - features=256, - backbone="vitb_rn50_384", - readout="project", - channels_last=False, - use_bn=False, - ): - - super(DPT, self).__init__() - - self.channels_last = channels_last - - hooks = { - "vitb_rn50_384": [0, 1, 8, 11], - "vitb16_384": [2, 5, 8, 11], - "vitl16_384": [5, 11, 17, 23], - } - - # Instantiate backbone and reassemble blocks - self.pretrained, self.scratch = _make_encoder( - backbone, - features, - False, # Set to true of you want to train from scratch, uses ImageNet weights - groups=1, - expand=False, - exportable=False, - hooks=hooks[backbone], - use_readout=readout, - ) - - self.scratch.refinenet1 = _make_fusion_block(features, use_bn) - self.scratch.refinenet2 = _make_fusion_block(features, use_bn) - self.scratch.refinenet3 = _make_fusion_block(features, use_bn) - self.scratch.refinenet4 = _make_fusion_block(features, use_bn) - - self.scratch.output_conv = head - - - def forward(self, x): - if self.channels_last == True: - x.contiguous(memory_format=torch.channels_last) - - layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return out - - -class DPTDepthModel(DPT): - def __init__(self, path=None, non_negative=True, **kwargs): - features = kwargs["features"] if "features" in kwargs else 256 - - head = nn.Sequential( - nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode="bilinear", align_corners=True), - nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), - nn.ReLU(True), - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - super().__init__(head, **kwargs) - - if path is not None: - self.load(path) - - def forward(self, x): - return super().forward(x).squeeze(dim=1) - diff --git a/spaces/TEnngal/bingo/src/components/ui/textarea.tsx b/spaces/TEnngal/bingo/src/components/ui/textarea.tsx deleted file mode 100644 index e25af722c7a5dc1121a9ab58d6716952f9f76081..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/ui/textarea.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface TextareaProps - extends React.TextareaHTMLAttributes {} - -const Textarea = React.forwardRef( - ({ className, ...props }, ref) => { - return ( - - - - -

    Output logs

    -
    - Output will be here -
    - -

    Check out the source code

    - - - - - diff --git a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/hysts/mediapipe-pose-estimation/app.py b/spaces/hysts/mediapipe-pose-estimation/app.py deleted file mode 100644 index 25059fac7a0b696a50317a33100ade5a76cd528b..0000000000000000000000000000000000000000 --- a/spaces/hysts/mediapipe-pose-estimation/app.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import pathlib - -import gradio as gr -import mediapipe as mp -import numpy as np - -mp_drawing = mp.solutions.drawing_utils -mp_drawing_styles = mp.solutions.drawing_styles -mp_pose = mp.solutions.pose - -TITLE = 'MediaPipe Human Pose Estimation' -DESCRIPTION = 'https://google.github.io/mediapipe/' - - -def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool, - min_detection_confidence: float, background_color: str) -> np.ndarray: - with mp_pose.Pose( - static_image_mode=True, - model_complexity=model_complexity, - enable_segmentation=enable_segmentation, - min_detection_confidence=min_detection_confidence) as pose: - results = pose.process(image) - - res = image[:, :, ::-1].copy() - if enable_segmentation: - if background_color == 'white': - bg_color = 255 - elif background_color == 'black': - bg_color = 0 - elif background_color == 'green': - bg_color = (0, 255, 0) # type: ignore - else: - raise ValueError - - if results.segmentation_mask is not None: - res[results.segmentation_mask <= 0.1] = bg_color - else: - res[:] = bg_color - - mp_drawing.draw_landmarks(res, - results.pose_landmarks, - mp_pose.POSE_CONNECTIONS, - landmark_drawing_spec=mp_drawing_styles. - get_default_pose_landmarks_style()) - - return res[:, :, ::-1] - - -model_complexities = list(range(3)) -background_colors = ['white', 'black', 'green'] - -image_paths = sorted(pathlib.Path('images').rglob('*.jpg')) -examples = [[path, model_complexities[1], True, 0.5, background_colors[0]] - for path in image_paths] - -gr.Interface( - fn=run, - inputs=[ - gr.Image(label='Input', type='numpy'), - gr.Radio(label='Model Complexity', - choices=model_complexities, - type='index', - value=model_complexities[1]), - gr.Checkbox(label='Enable Segmentation', value=True), - gr.Slider(label='Minimum Detection Confidence', - minimum=0, - maximum=1, - step=0.05, - value=0.5), - gr.Radio(label='Background Color', - choices=background_colors, - type='value', - value=background_colors[0]), - ], - outputs=gr.Image(label='Output', height=500), - examples=examples, - title=TITLE, - description=DESCRIPTION, -).queue().launch() diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dist.sh b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dist.sh deleted file mode 100644 index 9f3c6a5276a030652c9f2e81d535e0beb854f123..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/dist.sh +++ /dev/null @@ -1,15 +0,0 @@ -ip_list=("ip1" "ip2" "ip3" "ip4") - -config=wf42m_pfc03_32gpu_r100 - -for((node_rank=0;node_rank<${#ip_list[*]};node_rank++)); -do - ssh ubuntu@${ip_list[node_rank]} "cd `pwd`;PATH=$PATH \ - CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ - torchrun \ - --nproc_per_node=8 \ - --nnodes=${#ip_list[*]} \ - --node_rank=$node_rank \ - --master_addr=${ip_list[0]} \ - --master_port=22345 train.py configs/$config" & -done diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py deleted file mode 100644 index c6d3b9c240c24687d432197f976ee01fbf423216..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py +++ /dev/null @@ -1,187 +0,0 @@ -import torch -from torch import nn - -__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet18(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained, - progress, **kwargs) - - -def iresnet34(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained, - progress, **kwargs) - - -def iresnet50(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained, - progress, **kwargs) - - -def iresnet100(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, - progress, **kwargs) - - -def iresnet200(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained, - progress, **kwargs) - diff --git a/spaces/iamironman4279/SadTalker/src/face3d/util/util.py b/spaces/iamironman4279/SadTalker/src/face3d/util/util.py deleted file mode 100644 index 0d689ca138fc0fbf5bec794511ea0f9e638f9ea9..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/util/util.py +++ /dev/null @@ -1,208 +0,0 @@ -"""This script contains basic utilities for Deep3DFaceRecon_pytorch -""" -from __future__ import print_function -import numpy as np -import torch -from PIL import Image -import os -import importlib -import argparse -from argparse import Namespace -import torchvision - - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -def copyconf(default_opt, **kwargs): - conf = Namespace(**vars(default_opt)) - for key in kwargs: - setattr(conf, key, kwargs[key]) - return conf - -def genvalconf(train_opt, **kwargs): - conf = Namespace(**vars(train_opt)) - attr_dict = train_opt.__dict__ - for key, value in attr_dict.items(): - if 'val' in key and key.split('_')[0] in attr_dict: - setattr(conf, key.split('_')[0], value) - - for key in kwargs: - setattr(conf, key, kwargs[key]) - - return conf - -def find_class_in_module(target_cls_name, module): - target_cls_name = target_cls_name.replace('_', '').lower() - clslib = importlib.import_module(module) - cls = None - for name, clsobj in clslib.__dict__.items(): - if name.lower() == target_cls_name: - cls = clsobj - - assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name) - - return cls - - -def tensor2im(input_image, imtype=np.uint8): - """"Converts a Tensor array into a numpy image array. - - Parameters: - input_image (tensor) -- the input image tensor array, range(0, 1) - imtype (type) -- the desired type of the converted numpy array - """ - if not isinstance(input_image, np.ndarray): - if isinstance(input_image, torch.Tensor): # get the data from a variable - image_tensor = input_image.data - else: - return input_image - image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array - if image_numpy.shape[0] == 1: # grayscale to RGB - image_numpy = np.tile(image_numpy, (3, 1, 1)) - image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: tranpose and scaling - else: # if it is a numpy array, do nothing - image_numpy = input_image - return image_numpy.astype(imtype) - - -def diagnose_network(net, name='network'): - """Calculate and print the mean of average absolute(gradients) - - Parameters: - net (torch network) -- Torch network - name (str) -- the name of the network - """ - mean = 0.0 - count = 0 - for param in net.parameters(): - if param.grad is not None: - mean += torch.mean(torch.abs(param.grad.data)) - count += 1 - if count > 0: - mean = mean / count - print(name) - print(mean) - - -def save_image(image_numpy, image_path, aspect_ratio=1.0): - """Save a numpy image to the disk - - Parameters: - image_numpy (numpy array) -- input numpy array - image_path (str) -- the path of the image - """ - - image_pil = Image.fromarray(image_numpy) - h, w, _ = image_numpy.shape - - if aspect_ratio is None: - pass - elif aspect_ratio > 1.0: - image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) - elif aspect_ratio < 1.0: - image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) - image_pil.save(image_path) - - -def print_numpy(x, val=True, shp=False): - """Print the mean, min, max, median, std, and size of a numpy array - - Parameters: - val (bool) -- if print the values of the numpy array - shp (bool) -- if print the shape of the numpy array - """ - x = x.astype(np.float64) - if shp: - print('shape,', x.shape) - if val: - x = x.flatten() - print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( - np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) - - -def mkdirs(paths): - """create empty directories if they don't exist - - Parameters: - paths (str list) -- a list of directory paths - """ - if isinstance(paths, list) and not isinstance(paths, str): - for path in paths: - mkdir(path) - else: - mkdir(paths) - - -def mkdir(path): - """create a single empty directory if it didn't exist - - Parameters: - path (str) -- a single directory path - """ - if not os.path.exists(path): - os.makedirs(path) - - -def correct_resize_label(t, size): - device = t.device - t = t.detach().cpu() - resized = [] - for i in range(t.size(0)): - one_t = t[i, :1] - one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0)) - one_np = one_np[:, :, 0] - one_image = Image.fromarray(one_np).resize(size, Image.NEAREST) - resized_t = torch.from_numpy(np.array(one_image)).long() - resized.append(resized_t) - return torch.stack(resized, dim=0).to(device) - - -def correct_resize(t, size, mode=Image.BICUBIC): - device = t.device - t = t.detach().cpu() - resized = [] - for i in range(t.size(0)): - one_t = t[i:i + 1] - one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) - resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 - resized.append(resized_t) - return torch.stack(resized, dim=0).to(device) - -def draw_landmarks(img, landmark, color='r', step=2): - """ - Return: - img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) - - - Parameters: - img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) - landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction - color -- str, 'r' or 'b' (red or blue) - """ - if color =='r': - c = np.array([255., 0, 0]) - else: - c = np.array([0, 0, 255.]) - - _, H, W, _ = img.shape - img, landmark = img.copy(), landmark.copy() - landmark[..., 1] = H - 1 - landmark[..., 1] - landmark = np.round(landmark).astype(np.int32) - for i in range(landmark.shape[1]): - x, y = landmark[:, i, 0], landmark[:, i, 1] - for j in range(-step, step): - for k in range(-step, step): - u = np.clip(x + j, 0, W - 1) - v = np.clip(y + k, 0, H - 1) - for m in range(landmark.shape[0]): - img[m, v[m], u[m]] = c - return img diff --git a/spaces/inamXcontru/PoeticTTS/Borland Jbuilder 3 University Edition Of Office A Comprehensive Guide for Java Developers.md b/spaces/inamXcontru/PoeticTTS/Borland Jbuilder 3 University Edition Of Office A Comprehensive Guide for Java Developers.md deleted file mode 100644 index f9457064cd8d95bc12bc8ba437530a48b8b61771..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Borland Jbuilder 3 University Edition Of Office A Comprehensive Guide for Java Developers.md +++ /dev/null @@ -1,7 +0,0 @@ - -

    Package your text with other student resources.borland delphi 7 personal edition now available borland accelerates rapid application. Versions of borland kylix open edition and jbuilder personal.jbuilder 5 includes a tomcat 3.2 plugin the enterprise edition also has borland and weblogic.step 3: check email.includes jbuilder 2 university edition, and jbuilder 2 professional 30 day.borland jbuilder was spun off with codegear which.by brent thompson.jbuilder personal is a free graphical development environment for java from.changes over jbuilder 6: not too much has changed.jbuilder 2005 will ship three editions in september: borland jbuilder. A 30 .convert borland c builder 6 personal edition trail version to full software.this is screenshot from running borland c builder 6 personal edition on ubuntu 6.06 dapper in crossover with replaced shlwapi.dll for wine shlwapi.dll andweb editor lamont adams offers a rundown of jbuilder 6 features. Jbuilder 6: brewing java the borland way.by michael landy,.look at most relevant borland jbuilder 9 enterprise edition websites out of 5.29 thousand at keyoptimize. Borland. Borland jbuilder personal by borland.jbuilder 6 personal, cd .kodo jdo provides integration into jbuilder 7 and higher in the form of a jbuilder opentool.borland has managed to integrate multiple.it is full offline installer standalone setup of jbuilder java compiler ide for both 32 bit 64 bit version.borland c.

    -

    .no specific info about version 8.0. Please visit the main page of borland jbuilder.personal on software informer.they later took the download off their site as newer.share your experience:.my personal preference is to use the find option,.codegear jbuilder 2007.while the personal edition is just a base level tool, it gives you a chance to test drive jbuilder and upgrade to the pro or enterprise version.it is full offline installer.this will install jbuilder 6 as well as borland enterprise server, appserver edition.abstract: this document is a step by step guide to setting up and running a jboss server instance, and debugging your ejb classes all from within jbuilder 6 personal edition.select a free download for jbuilder.we have the largest serial numbers data base.jbuilder 6: brewing java the borland way. The personal edition is available as a free download and.jbuilder is an integrated development environment ide for the programming language java from embarcadero technologies.borland jbuilder 6 personal free download.hi all.select product.that has been my.new jbuilder 9 downloads now available.i have.jbuilder is offered in three versions: personal, professional, and enterprise.abstract: this document is a step by step guide to setting up and running a jboss server instance, and debugging your ejb classes all from within jbuilder 6 personal.pearson offers special pricing when you.

    -

    Borland Jbuilder 3 University Edition Of Office


    Download Ziphttps://gohhs.com/2uz51F



    -

    Builder 6 personal edition serial numbers.months ago, the usa borland site offered the free version of delphi 6 personal edition.jbuilder 6 free download setup iso.originally developed by.the integration features allow the jbuilder user to configure the kodo.borland jbuilder 6 personal: amazon.ca: software.the new release of borland jbuilder enterprise edition is available for.may, 2014.jbuilder personal is a free edition of.abstract: this document is a step by step guide to setting up and running a jboss server.borland c builder 6 personal edition eng.it is a great choice if you are just starting out or.jbuilder 6:.the examples can be compiled and run under jbuilder personal edition, a free edition of jbuilder.abstract: download new jbuilder 9 personal with more than 0 new features or a 30 day trial of jbuilder 9 enterprise borland jbuilder 9 personal is now available for download.has changed.borland has introduced borland enterprise studio 6 for java, to.jbuilder is an integrated developmentjbuilder personal.theserial key for c builder 6 personal edition can be found and viewed here.the examples can be compiled and run under jbuilder personal edition,.originally developed by borland jbuilder was.he is also a sun certified developer and a borlandborland jumped in in order to tackle this issue with its jbuilder 6 application.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/innat/HybridModel-GradCAM/layers/window_attention.py b/spaces/innat/HybridModel-GradCAM/layers/window_attention.py deleted file mode 100644 index 97d01af992a7bac963e559f2964b54118457bb90..0000000000000000000000000000000000000000 --- a/spaces/innat/HybridModel-GradCAM/layers/window_attention.py +++ /dev/null @@ -1,111 +0,0 @@ -import tensorflow as tf -from tensorflow.keras import layers - - -class WindowAttention(layers.Layer): - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - dropout_rate=0.0, - return_attention_scores=False, - **kwargs, - ): - super().__init__(**kwargs) - self.dim = dim - self.window_size = window_size - self.num_heads = num_heads - self.scale = (dim // num_heads) ** -0.5 - self.return_attention_scores = return_attention_scores - self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias) - self.dropout = layers.Dropout(dropout_rate) - self.proj = layers.Dense(dim) - - def build(self, input_shape): - self.relative_position_bias_table = self.add_weight( - shape=( - (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), - self.num_heads, - ), - initializer="zeros", - trainable=True, - name="relative_position_bias_table", - ) - - self.relative_position_index = self.get_relative_position_index( - self.window_size[0], self.window_size[1] - ) - super().build(input_shape) - - def get_relative_position_index(self, window_height, window_width): - x_x, y_y = tf.meshgrid(range(window_height), range(window_width)) - coords = tf.stack([y_y, x_x], axis=0) - coords_flatten = tf.reshape(coords, [2, -1]) - - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0]) - - x_x = (relative_coords[:, :, 0] + window_height - 1) * (2 * window_width - 1) - y_y = relative_coords[:, :, 1] + window_width - 1 - relative_coords = tf.stack([x_x, y_y], axis=-1) - - return tf.reduce_sum(relative_coords, axis=-1) - - def call(self, x, mask=None): - _, size, channels = x.shape - head_dim = channels // self.num_heads - x_qkv = self.qkv(x) - x_qkv = tf.reshape(x_qkv, shape=(-1, size, 3, self.num_heads, head_dim)) - x_qkv = tf.transpose(x_qkv, perm=(2, 0, 3, 1, 4)) - q, k, v = x_qkv[0], x_qkv[1], x_qkv[2] - q = q * self.scale - k = tf.transpose(k, perm=(0, 1, 3, 2)) - attn = q @ k - - relative_position_bias = tf.gather( - self.relative_position_bias_table, - self.relative_position_index, - axis=0, - ) - relative_position_bias = tf.transpose(relative_position_bias, [2, 0, 1]) - attn = attn + tf.expand_dims(relative_position_bias, axis=0) - - if mask is not None: - nW = mask.get_shape()[0] - mask_float = tf.cast( - tf.expand_dims(tf.expand_dims(mask, axis=1), axis=0), tf.float32 - ) - attn = ( - tf.reshape(attn, shape=(-1, nW, self.num_heads, size, size)) - + mask_float - ) - attn = tf.reshape(attn, shape=(-1, self.num_heads, size, size)) - attn = tf.nn.softmax(attn, axis=-1) - else: - attn = tf.nn.softmax(attn, axis=-1) - attn = self.dropout(attn) - - x_qkv = attn @ v - x_qkv = tf.transpose(x_qkv, perm=(0, 2, 1, 3)) - x_qkv = tf.reshape(x_qkv, shape=(-1, size, channels)) - x_qkv = self.proj(x_qkv) - x_qkv = self.dropout(x_qkv) - - if self.return_attention_scores: - return x_qkv, attn - else: - return x_qkv - - def get_config(self): - config = super().get_config() - config.update( - { - "dim": self.dim, - "window_size": self.window_size, - "num_heads": self.num_heads, - "scale": self.scale, - } - ) - return config diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/2CAudio Aether Algorithmic Reverb VST RTAS V151 Pack AiR ASSiG.md b/spaces/inplisQlawa/anything-midjourney-v4-1/2CAudio Aether Algorithmic Reverb VST RTAS V151 Pack AiR ASSiG.md deleted file mode 100644 index 1e6bdd8597f77f8996834bb23ad3c7ed33121444..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/2CAudio Aether Algorithmic Reverb VST RTAS V151 Pack AiR ASSiG.md +++ /dev/null @@ -1,15 +0,0 @@ -

    2CAudio Aether Algorithmic Reverb VST RTAS V151 Pack AiR ASSiG


    DOWNLOADhttps://urlin.us/2uEvI2



    - -2CAudio Aether Algorithmic Reverb VST RTAS V151 Pack AiR ASSiG ON REAudio Aether Reverb v1.5.0 Pack. -RUB 1,738 -In stock. -Delivery and payment information. -Description, characteristics, photo ... -Aether Reverb v1.5.3 - free download Aether Reverb v1.5.3 ... -Aether Reverb v1.5.5 - free download Aether Reverb v1.5.5 ... -Aether Reverb v1.0 - free download Aether Reverb v1.0 ... -Aether Reverb v1.5.0 - free download Aether Reverb v1.5.0 ... -Aether Reverb v1.5.0 - free download Aether Reverb v1.5.0 8a78ff9644
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack ((LINK)) Para Global Mapper 14 Rar.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Crack ((LINK)) Para Global Mapper 14 Rar.md deleted file mode 100644 index 0d0aefb0850209ec9322f3f8079ecc78bfc9c97d..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Crack ((LINK)) Para Global Mapper 14 Rar.md +++ /dev/null @@ -1,115 +0,0 @@ -
    -

    Crack Para Global Mapper 14 Rar: What You Need to Know

    - -

    Global Mapper is one of the most popular and powerful GIS software in the market. It allows you to work with various types of spatial data, such as vector, raster, elevation, lidar, satellite imagery, and more. It also provides you with many tools and features for data processing, mapping, visualization, and geospatial analysis.

    -

    Crack Para Global Mapper 14 Rar


    Download File »»» https://urlin.us/2uEvZP



    - -

    However, Global Mapper is not a cheap software. You need to buy a license to use it legally and fully. The price of Global Mapper depends on the version and the features you need. For example, the latest version of Global Mapper 24 costs $549 for a single-user license.

    - -

    If you want to use Global Mapper without paying for it, you might be interested in downloading and using Crack Para Global Mapper 14 Rar. This is a file that contains a cracked version of Global Mapper 14, which is an older version of the software. By using Crack Para Global Mapper 14 Rar, you can bypass the activation process and use Global Mapper 14 for free.

    - -

    But is it a good idea? Is it safe? Is it legal? In this article, we will answer these questions and show you how to download and use Crack Para Global Mapper 14 Rar.

    - -

    What is Crack Para Global Mapper 14 Rar?

    - -

    Crack Para Global Mapper 14 Rar is a file that contains a cracked version of Global Mapper 14. A cracked version of a software is a modified version that has been hacked to remove or bypass the security features that prevent unauthorized use. By using a cracked version of a software, you can use it without paying for it or activating it with a valid license.

    -

    - -

    Crack Para Global Mapper 14 Rar consists of two files: "Setup.exe" and "Crack.rar". The first file is the installer of Global Mapper 14, which allows you to install the software on your computer. The second file is the crack file, which contains the modified executable file of Global Mapper 14 that has been hacked to remove the activation process.

    - -

    By using Crack Para Global Mapper 14 Rar, you can install and use Global Mapper 14 for free on your computer. You do not need to enter any serial number or cd-key to activate it, as it is already registered with valid ones. You can also use it without any limitation or restriction.

    - -

    Where to Download Crack Para Global Mapper 14 Rar?

    - -

    There are many websites that claim to offer the download link for Crack Para Global Mapper 14 Rar. However, not all of them are reliable and trustworthy. Some of them might contain viruses, malware, or spyware that can damage your computer or steal your personal information. Some of them might also have broken or outdated links that do not work anymore.

    - -

    Therefore, you need to be careful and choose a reputable source for downloading Crack Para Global Mapper 14 Rar. One of the best sources is Peatix.com, a website that provides free download links for various software products. You can find the download link for Crack Para Global Mapper 14 Rar on Peatix.com by following these steps:

    - -
      -
    1. Go to https://peatix.com/group/10555660
    2. -
    3. Scroll down to the bottom of the page and click on the "Download" button.
    4. -
    5. You will be redirected to another page where you need to complete a captcha verification.
    6. -
    7. After completing the captcha verification, you will see the download link for Crack Para Global Mapper 14 Rar.
    8. -
    9. Click on the download link and save the file to your computer.
    10. -
    11. The file name is "Global_Mapper_V_14_64_Bit_Crack.rar". It is a compressed file that contains both the installer and the crack file of Global Mapper 14.
    12. -
    13. Extract the file using a program like WinRAR or 7-Zip.
    14. -
    15. You will get a folder named "Global_Mapper_V_14_64_Bit_Crack". Inside this folder, you will find two files: "Setup.exe" and "Crack.rar".
    16. -
    17. You have successfully downloaded Crack Para Global Mapper 14 Rar.
    18. -
    - -

    How to Install and Use Crack Para Global Mapper 14 Rar?

    - -

    Now that you have downloaded Crack Para Global Mapper 14 Rar, you can install and use it on your computer. However, you need to be aware that this is an illegal and risky way of using Global Mapper. You might face some problems or issues while using Crack Para Global Mapper 14 Rar, such as compatibility issues, errors, bugs, or crashes. You might also violate the copyright law and face legal consequences if you are caught using Crack Para Global Mapper 14 Rar.

    - -

    Therefore, we do not recommend or encourage you to use Crack Para Global Mapper 14 Rar. We only provide this information for educational purposes only. If you like Global Mapper and want to use it legally and fully, please support the developers and publishers by buying it from their official website or platform.

    - -

    If you still want to use Crack Para Global Mapper 14 Rar at your own risk and responsibility, you can follow these steps:

    - -
      -
    1. Go to the folder where you extracted Crack Para Global Mapper 14 Rar.
    2. -
    3. Double-click on the file named "Setup.exe". This will start the installation process of Global Mapper 14.
    4. -
    5. Follow the instructions on the screen until the installation is complete.
    6. -
    7. Do not run or open Global Mapper 14 yet.
    8. -
    9. Go back to the folder where you extracted Crack Para Global Mapper 14 Rar.
    10. -
    11. Extract the file named "Crack.rar" using a program like WinRAR or 7-Zip.
    12. -
    13. You will get a file named "global_mapper.exe". This is the cracked executable file of Global Mapper 14.
    14. -
    15. Copy this file and paste it into the folder where you installed Global Mapper 14 (usually C:\Program Files\GlobalMapper).
    16. -
    17. Replace the original file if prompted.
    18. -
    19. You have successfully installed and cracked Global Mapper 14.
    20. -
    - -

    How to Use Crack Para Global Mapper 14 Rar?

    - -

    Now that you have installed and cracked Global Mapper 14, you can start using it on your computer. You do not need to activate it with any serial number or cd-key, as it is already registered with valid ones. You can also use it without any limitation or restriction.

    - -

    To use Crack Para Global Mapper 14 Rar, you just need to follow these steps:

    - -
      -
    1. Go to the folder where you installed

      -

      What are the Advantages and Disadvantages of Using Crack Para Global Mapper 14 Rar?

      - -

      Using Crack Para Global Mapper 14 Rar might seem like a good idea if you want to save money and use Global Mapper for free. However, there are also some drawbacks and risks that you need to consider before using Crack Para Global Mapper 14 Rar. Here are some of the advantages and disadvantages of using Crack Para Global Mapper 14 Rar:

      - -

      Advantages:

      - -
        -
      • You can use Global Mapper 14 for free without paying for it or activating it with a valid license.
      • -
      • You can use all the features and functions of Global Mapper 14 without any limitation or restriction.
      • -
      • You can work with various types of spatial data and perform various geospatial tasks with Global Mapper 14.
      • -
      - -

      Disadvantages:

      - -
        -
      • You are using an illegal and pirated version of Global Mapper 14, which violates the copyright law and the terms of service of the software.
      • -
      • You might face legal consequences if you are caught using Crack Para Global Mapper 14 Rar, such as fines, lawsuits, or even jail time.
      • -
      • You are using an outdated and unsupported version of Global Mapper 14, which might have compatibility issues, errors, bugs, or crashes with your system or data.
      • -
      • You might not be able to access the latest updates, patches, or features of Global Mapper 14, which might affect the performance and quality of your work.
      • -
      • You might expose your computer or data to viruses, malware, or spyware that might be hidden in Crack Para Global Mapper 14 Rar or the websites that offer it.
      • -
      • You might lose your data or damage your computer if Crack Para Global Mapper 14 Rar does not work properly or causes any problems.
      • -
      - -

      What are the Alternatives to Using Crack Para Global Mapper 14 Rar?

      - -

      If you want to use Global Mapper legally and safely, you should avoid using Crack Para Global Mapper 14 Rar and look for some alternatives. Here are some of the alternatives to using Crack Para Global Mapper 14 Rar:

      - -
        -
      • Buy a license for Global Mapper from the official website or platform. This is the best and most recommended way to use Global Mapper legally and fully. You can choose the version and the features that suit your needs and budget. You can also enjoy the benefits of technical support, customer service, updates, patches, and features.
      • -
      • Use a trial version of Global Mapper from the official website or platform. This is a good way to test and evaluate Global Mapper before buying it. You can use a trial version of Global Mapper for a limited time (usually 14 days) with full functionality. You can also access the online resources, tutorials, and forums to learn more about Global Mapper.
      • -
      • Use a free or open source GIS software that has similar features and functions as Global Mapper. This is a good way to use a GIS software without paying for it or activating it with a valid license. However, you might not get the same quality and performance as Global Mapper. You might also have some limitations or restrictions on the data sources, formats, tools, or features that you can use. Some examples of free or open source GIS software are QGIS, GRASS GIS, SAGA GIS, MapWindow GIS, uDig, gvSIG, etc.
      • -
      - -

      Conclusion

      - -

      Global Mapper is a great GIS software that can help you with various geospatial tasks. However, using Crack Para Global Mapper 14 Rar is not a good idea if you want to use it legally and safely. You might face some problems or risks while using Crack Para Global Mapper 14 Rar, such as legal consequences, compatibility issues, errors, bugs, crashes, viruses, malware, spyware, data loss, or computer damage.

      - -

      Therefore, we suggest you to avoid using Crack Para Global Mapper 14 Rar and look for some alternatives. You can buy a license for Global Mapper from the official website or platform, use a trial version of Global Mapper from the official website or platform, or use a free or open source GIS software that has similar features and functions as Global Mapper.

      - -

      We hope this article has helped you understand what Crack Para Global Mapper 14 Rar is and how to download and use it. We also hope this article has helped you decide whether to use Crack Para Global Mapper 14 Rar or not. If you have any questions or comments about this article -

      In conclusion, Crack Para Global Mapper 14 Rar is a file that contains a cracked version of Global Mapper 14, a powerful and versatile GIS software. By using Crack Para Global Mapper 14 Rar, you can use Global Mapper 14 for free without paying for it or activating it with a valid license. However, this is an illegal and risky way of using Global Mapper. You might face some problems or issues while using Crack Para Global Mapper 14 Rar, such as legal consequences, compatibility issues, errors, bugs, crashes, viruses, malware, spyware, data loss, or computer damage.

      - -

      Therefore, we recommend you to avoid using Crack Para Global Mapper 14 Rar and look for some alternatives. You can buy a license for Global Mapper from the official website or platform, use a trial version of Global Mapper from the official website or platform, or use a free or open source GIS software that has similar features and functions as Global Mapper.

      - -

      We hope this article has helped you understand what Crack Para Global Mapper 14 Rar is and how to download and use it. We also hope this article has helped you decide whether to use Crack Para Global Mapper 14 Rar or not. If you have any questions or comments about this article

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Edius 7.2 Serial Keygen High Quality.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Edius 7.2 Serial Keygen High Quality.md deleted file mode 100644 index 3c1f41b652f1a5c5364df243c6e4c87ab94b8d1f..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Edius 7.2 Serial Keygen High Quality.md +++ /dev/null @@ -1,29 +0,0 @@ - -

      How to Activate EDIUS Pro 7.2 with Serial Keygen

      -

      EDIUS Pro 7.2 is a powerful video editing software that supports various formats, resolutions, and frame rates. It allows you to edit 4K, 3D, HD, SD, and any combination of them on the same timeline. It also offers real-time editing, advanced color grading, multicam editing, and more.

      -

      If you want to use EDIUS Pro 7.2 without any limitations, you need to activate it with a serial keygen. A serial keygen is a program that generates valid serial numbers for a software product. You can use a serial keygen to get a serial number for EDIUS Pro 7.2 and register it online or offline.

      -

      edius 7.2 serial keygen


      DOWNLOAD ————— https://urlin.us/2uExXn



      -

      Here are the steps to activate EDIUS Pro 7.2 with a serial keygen:

      -
        -
      1. Download a serial keygen for EDIUS Pro 7.2 from a reliable source. You can find some serial keygens on the web, such as [^2^] or [^4^], but be careful of malware and viruses.
      2. -
      3. Run the serial keygen and click on the Generate button. You will get a 22-digit serial number for EDIUS Pro 7.2.
      4. -
      5. Double-click on the EDIUS icon on your desktop to launch the software. During the first launch, you will see a screen to enter your serial number.
      6. -
      7. Enter the serial number that you got from the serial keygen and click on Next.
      8. -
      9. Choose whether you want to register online or offline. Online registration requires an internet connection and an email address. Offline registration requires an ID file and a license file.
      10. -
      11. If you choose online registration, follow the on-screen instructions to complete the process. You will receive an email confirmation with your activation code.
      12. -
      13. If you choose offline registration, follow these steps:
      14. -
          -
        • Create an ID file by clicking on Create ID File and saving it to your computer.
        • -
        • Go to https://wwwapps.grassvalley.com/activation/ on another computer that has internet access and upload your ID file.
        • -
        • You will receive a license file by email. Download it and save it to your computer.
        • -
        • Go back to your EDIUS computer and click on Register License File. Browse to the license file and open it.
        • -
        -
      -

      Congratulations! You have successfully activated EDIUS Pro 7.2 with a serial keygen. You can now enjoy all the features of this amazing video editing software.

      - -

      EDIUS Pro 7.2 is designed to handle complex and diverse projects with ease. It supports a wide range of formats, such as XAVC, XAVC S, AVC-Ultra, AVCHD, MPEG-2, DVCPRO, and more. It also supports native editing of various frame rates, such as 24p, 25p, 30p, 50p, 60p, and more. You can mix and match different formats and frame rates on the same timeline without any conversion or rendering.

      -

      EDIUS Pro 7.2 also offers advanced tools for color correction, audio editing, titling, effects, transitions, and filters. You can use the built-in tools or integrate with third-party plugins and applications. For example, you can use the EDIUS ID to export and import projects to and from DaVinci Resolve for professional color grading. You can also use the EDIUS OFX Bridge to access hundreds of OFX plugins from various developers.

      -

      -

      EDIUS Pro 7.2 is compatible with various hardware devices and platforms. You can use it with Windows 10, Windows 8, and Windows 7 operating systems. You can also use it with Blackmagic Design's UltraStudio 4K and DeckLink 4K Extreme capture and playback devices for 4K workflows. You can also use it with Grass Valley's STORM 3G Elite and STORM Mobile for SDI and HDMI input and output.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Dreamweaver Cs6 Serial Number Generator.md b/spaces/inreVtussa/clothingai/Examples/Adobe Dreamweaver Cs6 Serial Number Generator.md deleted file mode 100644 index 480eefe00280674fbbbdf186ca9c49c4ef88b67b..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Adobe Dreamweaver Cs6 Serial Number Generator.md +++ /dev/null @@ -1,13 +0,0 @@ -

      adobe dreamweaver cs6 serial number generator


      Download File ✫✫✫ https://tiurll.com/2uCjPn



      -
      -Adobe Dreamweaver Cs6 Crack [Serial Number + Keygen] Download Adobe Dreamweaver CS6 is a powerful application with many beautiful features. Download Adobe Dreamweaver Cs6 Crack for free. -This page shows all possible torrents for adobe dreamweaver cs6. -You can download absolutely free adobe dreamweaver cs6 via magnet link, an analogue of a torrent without downloading a torrent file. -Adobe Dreamweaver CS6. -Adobe Dreamweaver CS6 Crack Free Download. -Dreamweaver cs6 crack download. -Crack + crack for adobe dreamweaver cs6 download. -Download and install Adobe Dreamweaver CS6. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/DisplayFusion Pro 9.6 _VERIFIED_ Free Download Portable.md b/spaces/inreVtussa/clothingai/Examples/DisplayFusion Pro 9.6 _VERIFIED_ Free Download Portable.md deleted file mode 100644 index 0068805754b264dd4e1d75a25db729c983970535..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/DisplayFusion Pro 9.6 _VERIFIED_ Free Download Portable.md +++ /dev/null @@ -1,6 +0,0 @@ -

      DisplayFusion Pro 9.6 Free Download Portable


      Download --->>> https://tiurll.com/2uClY7



      -
      -May 5, 2021 - DisplayFusion Crack makes working with two monitors (or three monitors or more) smooth and painless. With DisplayFusion you can add . May 5, 2021 - DisplayFusion Crack makes working with two monitors (or three monitors or more) smooth and painless. With DisplayFusion, you can add up to 16 independent screens to your Windows 10 desktop. DisplayFusion allows you to easily switch between screens, customize fonts, themes, and resolutions. DisplayFusion is the easiest and most powerful multi-display software on the market. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/jackli888/stable-diffusion-webui/modules/styles.py b/spaces/jackli888/stable-diffusion-webui/modules/styles.py deleted file mode 100644 index d635c0109a1afd8867ef29b2d66ad864e1658113..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/styles.py +++ /dev/null @@ -1,87 +0,0 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - -import csv -import os -import os.path -import typing -import collections.abc as abc -import tempfile -import shutil - -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - - -class PromptStyle(typing.NamedTuple): - name: str - prompt: str - negative_prompt: str - - -def merge_prompts(style_prompt: str, prompt: str) -> str: - if "{prompt}" in style_prompt: - res = style_prompt.replace("{prompt}", prompt) - else: - parts = filter(None, (prompt.strip(), style_prompt.strip())) - res = ", ".join(parts) - - return res - - -def apply_styles_to_prompt(prompt, styles): - for style in styles: - prompt = merge_prompts(style, prompt) - - return prompt - - -class StyleDatabase: - def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") - self.styles = {} - self.path = path - - self.reload() - - def reload(self): - self.styles.clear() - - if not os.path.exists(self.path): - return - - with open(self.path, "r", encoding="utf-8-sig", newline='') as file: - reader = csv.DictReader(file) - for row in reader: - # Support loading old CSV format with "name, text"-columns - prompt = row["prompt"] if "prompt" in row else row["text"] - negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) - - def get_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).prompt for x in styles] - - def get_negative_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).negative_prompt for x in styles] - - def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) - - def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") - with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) diff --git a/spaces/jesuspj/jp/Dockerfile b/spaces/jesuspj/jp/Dockerfile deleted file mode 100644 index 94ee76a4f45af463ab7f945633c9258172f9cc80..0000000000000000000000000000000000000000 --- a/spaces/jesuspj/jp/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM huggingface/autotrain-advanced:latest -CMD autotrain app --port 7860 diff --git a/spaces/jipenaflor/Youtube-Transcript-Summarizer/README.md b/spaces/jipenaflor/Youtube-Transcript-Summarizer/README.md deleted file mode 100644 index 9fd8e1dd3947eb0f02dd6f78dccd79998cb5eaad..0000000000000000000000000000000000000000 --- a/spaces/jipenaflor/Youtube-Transcript-Summarizer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Youtube Transcript Summarizer -emoji: 🌖 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 2.8.14 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/jlondonobo/whisper-pt-demo/hf_to_whisper.py b/spaces/jlondonobo/whisper-pt-demo/hf_to_whisper.py deleted file mode 100644 index 8811e726ff0c9963beefa16f8c56fe3318a549bd..0000000000000000000000000000000000000000 --- a/spaces/jlondonobo/whisper-pt-demo/hf_to_whisper.py +++ /dev/null @@ -1,70 +0,0 @@ -# Original script: bayartsogt-ya/whisper-multiple-hf-datasets -from copy import deepcopy -import torch -from transformers import WhisperForConditionalGeneration - - -WHISPER_MAPPING = { - "layers": "blocks", - "fc1": "mlp.0", - "fc2": "mlp.2", - "final_layer_norm": "mlp_ln", - "layers": "blocks", - ".self_attn.q_proj": ".attn.query", - ".self_attn.k_proj": ".attn.key", - ".self_attn.v_proj": ".attn.value", - ".self_attn_layer_norm": ".attn_ln", - ".self_attn.out_proj": ".attn.out", - ".encoder_attn.q_proj": ".cross_attn.query", - ".encoder_attn.k_proj": ".cross_attn.key", - ".encoder_attn.v_proj": ".cross_attn.value", - ".encoder_attn_layer_norm": ".cross_attn_ln", - ".encoder_attn.out_proj": ".cross_attn.out", - "decoder.layer_norm.": "decoder.ln.", - "encoder.layer_norm.": "encoder.ln_post.", - "embed_tokens": "token_embedding", - "encoder.embed_positions.weight": "encoder.positional_embedding", - "decoder.embed_positions.weight": "decoder.positional_embedding", - "layer_norm": "ln_post", -} - - -def rename_keys(s_dict): - keys = list(s_dict.keys()) - for key in keys: - new_key = key - for k, v in WHISPER_MAPPING.items(): - if k in key: - new_key = new_key.replace(k, v) - - print(f"{key} -> {new_key}") - - s_dict[new_key] = s_dict.pop(key) - return s_dict - - -def write_whisper_model_to_memory( - hf_model_name_or_path: str, - whisper_state_path: str -): - transformer_model = WhisperForConditionalGeneration.from_pretrained(hf_model_name_or_path) - config = transformer_model.config - - # first build dims - dims = { - 'n_mels': config.num_mel_bins, - 'n_vocab': config.vocab_size, - 'n_audio_ctx': config.max_source_positions, - 'n_audio_state': config.d_model, - 'n_audio_head': config.encoder_attention_heads, - 'n_audio_layer': config.encoder_layers, - 'n_text_ctx': config.max_target_positions, - 'n_text_state': config.d_model, - 'n_text_head': config.decoder_attention_heads, - 'n_text_layer': config.decoder_layers - } - - state_dict = deepcopy(transformer_model.model.state_dict()) - state_dict = rename_keys(state_dict) - - torch.save({"dims": dims, "model_state_dict": state_dict}, whisper_state_path) \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_cbsonmodule.c b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_cbsonmodule.c deleted file mode 100644 index ce5c36bf9367b786e485e0214e51acd221793a0d..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/bson/_cbsonmodule.c +++ /dev/null @@ -1,3264 +0,0 @@ -/* - * Copyright 2009-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * This file contains C implementations of some of the functions - * needed by the bson module. If possible, these implementations - * should be used to speed up BSON encoding and decoding. - */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#include "datetime.h" - -#include "buffer.h" -#include "time64.h" - -#define _CBSON_MODULE -#include "_cbsonmodule.h" - -/* New module state and initialization code. - * See the module-initialization-and-state - * section in the following doc: - * http://docs.python.org/release/3.1.3/howto/cporting.html - * which references the following pep: - * http://www.python.org/dev/peps/pep-3121/ - * */ -struct module_state { - PyObject* Binary; - PyObject* Code; - PyObject* ObjectId; - PyObject* DBRef; - PyObject* Regex; - PyObject* UUID; - PyObject* Timestamp; - PyObject* MinKey; - PyObject* MaxKey; - PyObject* UTC; - PyTypeObject* REType; - PyObject* BSONInt64; - PyObject* Decimal128; - PyObject* Mapping; - PyObject* DatetimeMS; - PyObject* _min_datetime_ms; - PyObject* _max_datetime_ms; - PyObject* _type_marker_str; - PyObject* _flags_str; - PyObject* _pattern_str; - PyObject* _encoder_map_str; - PyObject* _decoder_map_str; - PyObject* _fallback_encoder_str; - PyObject* _raw_str; - PyObject* _subtype_str; - PyObject* _binary_str; - PyObject* _scope_str; - PyObject* _inc_str; - PyObject* _time_str; - PyObject* _bid_str; - PyObject* _replace_str; - PyObject* _astimezone_str; - PyObject* _id_str; - PyObject* _dollar_ref_str; - PyObject* _dollar_id_str; - PyObject* _dollar_db_str; - PyObject* _tzinfo_str; - PyObject* _as_doc_str; - PyObject* _utcoffset_str; - PyObject* _from_uuid_str; - PyObject* _as_uuid_str; - PyObject* _from_bid_str; -}; - -#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) - -/* Maximum number of regex flags */ -#define FLAGS_SIZE 7 - -/* Default UUID representation type code. */ -#define PYTHON_LEGACY 3 - -/* Other UUID representations. */ -#define STANDARD 4 -#define JAVA_LEGACY 5 -#define CSHARP_LEGACY 6 -#define UNSPECIFIED 0 - -#define BSON_MAX_SIZE 2147483647 -/* The smallest possible BSON document, i.e. "{}" */ -#define BSON_MIN_SIZE 5 - -/* Datetime codec options */ -#define DATETIME 1 -#define DATETIME_CLAMP 2 -#define DATETIME_MS 3 -#define DATETIME_AUTO 4 - -/* Converts integer to its string representation in decimal notation. */ -extern int cbson_long_long_to_str(long long num, char* str, size_t size) { - // Buffer should fit 64-bit signed integer - if (size < 21) { - PyErr_Format( - PyExc_RuntimeError, - "Buffer too small to hold long long: %d < 21", size); - return -1; - } - int index = 0; - int sign = 1; - // Convert to unsigned to handle -LLONG_MIN overflow - unsigned long long absNum; - // Handle the case of 0 - if (num == 0) { - str[index++] = '0'; - str[index] = '\0'; - return 0; - } - // Handle negative numbers - if (num < 0) { - sign = -1; - absNum = 0ULL - (unsigned long long)num; - } else { - absNum = (unsigned long long)num; - } - // Convert the number to string - unsigned long long digit; - while (absNum > 0) { - digit = absNum % 10ULL; - str[index++] = (char)digit + '0'; // Convert digit to character - absNum /= 10; - } - // Add minus sign if negative - if (sign == -1) { - str[index++] = '-'; - } - str[index] = '\0'; // Null terminator - // Reverse the string - int start = 0; - int end = index - 1; - while (start < end) { - char temp = str[start]; - str[start++] = str[end]; - str[end--] = temp; - } - return 0; -} - -static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { - // Test extreme values - Py_ssize_t maxNum = PY_SSIZE_T_MAX; - Py_ssize_t minNum = PY_SSIZE_T_MIN; - Py_ssize_t num; - char str_1[BUF_SIZE]; - char str_2[BUF_SIZE]; - int res = LL2STR(str_1, (long long)minNum); - if (res == -1) { - return NULL; - } - INT2STRING(str_2, (long long)minNum); - if (strcmp(str_1, str_2) != 0) { - PyErr_Format( - PyExc_RuntimeError, - "LL2STR != INT2STRING: %s != %s", str_1, str_2); - return NULL; - } - LL2STR(str_1, (long long)maxNum); - INT2STRING(str_2, (long long)maxNum); - if (strcmp(str_1, str_2) != 0) { - PyErr_Format( - PyExc_RuntimeError, - "LL2STR != INT2STRING: %s != %s", str_1, str_2); - return NULL; - } - - // Test common values - for (num = 0; num < 10000; num++) { - char str_1[BUF_SIZE]; - char str_2[BUF_SIZE]; - LL2STR(str_1, (long long)num); - INT2STRING(str_2, (long long)num); - if (strcmp(str_1, str_2) != 0) { - PyErr_Format( - PyExc_RuntimeError, - "LL2STR != INT2STRING: %s != %s", str_1, str_2); - return NULL; - } - } - - return args; -} - -/* Get an error class from the bson.errors module. - * - * Returns a new ref */ -static PyObject* _error(char* name) { - PyObject* error; - PyObject* errors = PyImport_ImportModule("bson.errors"); - if (!errors) { - return NULL; - } - error = PyObject_GetAttrString(errors, name); - Py_DECREF(errors); - return error; -} - -/* Safely downcast from Py_ssize_t to int, setting an - * exception and returning -1 on error. */ -static int -_downcast_and_check(Py_ssize_t size, uint8_t extra) { - if (size > BSON_MAX_SIZE || ((BSON_MAX_SIZE - extra) < size)) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "String length must be <= 2147483647"); - Py_DECREF(InvalidStringData); - } - return -1; - } - return (int)size + extra; -} - -static PyObject* elements_to_dict(PyObject* self, const char* string, - unsigned max, - const codec_options_t* options); - -static int _write_element_to_buffer(PyObject* self, buffer_t buffer, - int type_byte, PyObject* value, - unsigned char check_keys, - const codec_options_t* options, - unsigned char in_custom_call, - unsigned char in_fallback_call); - -/* Write a RawBSONDocument to the buffer. - * Returns the number of bytes written or 0 on failure. - */ -static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); - -/* Date stuff */ -static PyObject* datetime_from_millis(long long millis) { - /* To encode a datetime instance like datetime(9999, 12, 31, 23, 59, 59, 999999) - * we follow these steps: - * 1. Calculate a timestamp in seconds: 253402300799 - * 2. Multiply that by 1000: 253402300799000 - * 3. Add in microseconds divided by 1000 253402300799999 - * - * (Note: BSON doesn't support microsecond accuracy, hence the rounding.) - * - * To decode we could do: - * 1. Get seconds: timestamp / 1000: 253402300799 - * 2. Get micros: (timestamp % 1000) * 1000: 999000 - * Resulting in datetime(9999, 12, 31, 23, 59, 59, 999000) -- the expected result - * - * Now what if the we encode (1, 1, 1, 1, 1, 1, 111111)? - * 1. and 2. gives: -62135593139000 - * 3. Gives us: -62135593138889 - * - * Now decode: - * 1. Gives us: -62135593138 - * 2. Gives us: -889000 - * Resulting in datetime(1, 1, 1, 1, 1, 2, 15888216) -- an invalid result - * - * If instead to decode we do: - * diff = ((millis % 1000) + 1000) % 1000: 111 - * seconds = (millis - diff) / 1000: -62135593139 - * micros = diff * 1000 111000 - * Resulting in datetime(1, 1, 1, 1, 1, 1, 111000) -- the expected result - */ - int diff = (int)(((millis % 1000) + 1000) % 1000); - int microseconds = diff * 1000; - Time64_T seconds = (millis - diff) / 1000; - struct TM timeinfo; - cbson_gmtime64_r(&seconds, &timeinfo); - - return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, - timeinfo.tm_mon + 1, - timeinfo.tm_mday, - timeinfo.tm_hour, - timeinfo.tm_min, - timeinfo.tm_sec, - microseconds); -} - -static long long millis_from_datetime(PyObject* datetime) { - struct TM timeinfo; - long long millis; - - timeinfo.tm_year = PyDateTime_GET_YEAR(datetime) - 1900; - timeinfo.tm_mon = PyDateTime_GET_MONTH(datetime) - 1; - timeinfo.tm_mday = PyDateTime_GET_DAY(datetime); - timeinfo.tm_hour = PyDateTime_DATE_GET_HOUR(datetime); - timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); - timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - - millis = cbson_timegm64(&timeinfo) * 1000; - millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; - return millis; -} - -/* Extended-range datetime, returns a DatetimeMS object with millis */ -static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ - // Allocate a new DatetimeMS object. - struct module_state *state = GETSTATE(self); - - PyObject* dt; - PyObject* ll_millis; - - if (!(ll_millis = PyLong_FromLongLong(millis))){ - return NULL; - } - dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); - Py_DECREF(ll_millis); - return dt; -} - -/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ -static int millis_from_datetime_ms(PyObject* dt, long long* out){ - PyObject* ll_millis; - long long millis; - - if (!(ll_millis = PyNumber_Long(dt))){ - return 0; - } - millis = PyLong_AsLongLong(ll_millis); - Py_DECREF(ll_millis); - if (millis == -1 && PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB datetimes can only handle up to 8-byte ints"); - return 0; - } - *out = millis; - return 1; -} - -/* Just make this compatible w/ the old API. */ -int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (pymongo_buffer_write(buffer, data, size)) { - return 0; - } - return 1; -} - -int buffer_write_double(buffer_t buffer, double data) { - double data_le = BSON_DOUBLE_TO_LE(data); - return buffer_write_bytes(buffer, (const char*)&data_le, 8); -} - -int buffer_write_int32(buffer_t buffer, int32_t data) { - uint32_t data_le = BSON_UINT32_TO_LE(data); - return buffer_write_bytes(buffer, (const char*)&data_le, 4); -} - -int buffer_write_int64(buffer_t buffer, int64_t data) { - uint64_t data_le = BSON_UINT64_TO_LE(data); - return buffer_write_bytes(buffer, (const char*)&data_le, 8); -} - -void buffer_write_int32_at_position(buffer_t buffer, - int position, - int32_t data) { - uint32_t data_le = BSON_UINT32_TO_LE(data); - memcpy(pymongo_buffer_get_buffer(buffer) + position, &data_le, 4); -} - -static int write_unicode(buffer_t buffer, PyObject* py_string) { - int size; - const char* data; - PyObject* encoded = PyUnicode_AsUTF8String(py_string); - if (!encoded) { - return 0; - } - data = PyBytes_AS_STRING(encoded); - if (!data) - goto unicodefail; - - if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) - goto unicodefail; - - if (!buffer_write_int32(buffer, (int32_t)size)) - goto unicodefail; - - if (!buffer_write_bytes(buffer, data, size)) - goto unicodefail; - - Py_DECREF(encoded); - return 1; - -unicodefail: - Py_DECREF(encoded); - return 0; -} - -/* returns 0 on failure */ -static int write_string(buffer_t buffer, PyObject* py_string) { - int size; - const char* data; - if (PyUnicode_Check(py_string)){ - return write_unicode(buffer, py_string); - } - data = PyBytes_AsString(py_string); - if (!data) { - return 0; - } - - if ((size = _downcast_and_check(PyBytes_Size(py_string), 1)) == -1) - return 0; - - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; -} - -/* - * Are we in the main interpreter or a sub-interpreter? - * Useful for deciding if we can use cached pure python - * types in mod_wsgi. - */ -static int -_in_main_interpreter(void) { - static PyInterpreterState* main_interpreter = NULL; - PyInterpreterState* interpreter; - - if (main_interpreter == NULL) { - interpreter = PyInterpreterState_Head(); - - while (PyInterpreterState_Next(interpreter)) - interpreter = PyInterpreterState_Next(interpreter); - - main_interpreter = interpreter; - } - - return (main_interpreter == PyThreadState_Get()->interp); -} - -/* - * Get a reference to a pure python type. If we are in the - * main interpreter return the cached object, otherwise import - * the object we need and return it instead. - */ -static PyObject* -_get_object(PyObject* object, char* module_name, char* object_name) { - if (_in_main_interpreter()) { - Py_XINCREF(object); - return object; - } else { - PyObject* imported = NULL; - PyObject* module = PyImport_ImportModule(module_name); - if (!module) - return NULL; - imported = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - return imported; - } -} - -/* Load a Python object to cache. - * - * Returns non-zero on failure. */ -static int _load_object(PyObject** object, char* module_name, char* object_name) { - PyObject* module; - - module = PyImport_ImportModule(module_name); - if (!module) { - return 1; - } - - *object = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - - return (*object) ? 0 : 2; -} - -/* Load all Python objects to cache. - * - * Returns non-zero on failure. */ -static int _load_python_objects(PyObject* module) { - PyObject* empty_string = NULL; - PyObject* re_compile = NULL; - PyObject* compiled = NULL; - struct module_state *state = GETSTATE(module); - - /* Cache commonly used attribute names to improve performance. */ - if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && - (state->_flags_str = PyUnicode_FromString("flags")) && - (state->_pattern_str = PyUnicode_FromString("pattern")) && - (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && - (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && - (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && - (state->_raw_str = PyUnicode_FromString("raw")) && - (state->_subtype_str = PyUnicode_FromString("subtype")) && - (state->_binary_str = PyUnicode_FromString("binary")) && - (state->_scope_str = PyUnicode_FromString("scope")) && - (state->_inc_str = PyUnicode_FromString("inc")) && - (state->_time_str = PyUnicode_FromString("time")) && - (state->_bid_str = PyUnicode_FromString("bid")) && - (state->_replace_str = PyUnicode_FromString("replace")) && - (state->_astimezone_str = PyUnicode_FromString("astimezone")) && - (state->_id_str = PyUnicode_FromString("_id")) && - (state->_dollar_ref_str = PyUnicode_FromString("$ref")) && - (state->_dollar_id_str = PyUnicode_FromString("$id")) && - (state->_dollar_db_str = PyUnicode_FromString("$db")) && - (state->_tzinfo_str = PyUnicode_FromString("tzinfo")) && - (state->_as_doc_str = PyUnicode_FromString("as_doc")) && - (state->_utcoffset_str = PyUnicode_FromString("utcoffset")) && - (state->_from_uuid_str = PyUnicode_FromString("from_uuid")) && - (state->_as_uuid_str = PyUnicode_FromString("as_uuid")) && - (state->_from_bid_str = PyUnicode_FromString("from_bid")))) { - return 1; - } - - if (_load_object(&state->Binary, "bson.binary", "Binary") || - _load_object(&state->Code, "bson.code", "Code") || - _load_object(&state->ObjectId, "bson.objectid", "ObjectId") || - _load_object(&state->DBRef, "bson.dbref", "DBRef") || - _load_object(&state->Timestamp, "bson.timestamp", "Timestamp") || - _load_object(&state->MinKey, "bson.min_key", "MinKey") || - _load_object(&state->MaxKey, "bson.max_key", "MaxKey") || - _load_object(&state->UTC, "bson.tz_util", "utc") || - _load_object(&state->Regex, "bson.regex", "Regex") || - _load_object(&state->BSONInt64, "bson.int64", "Int64") || - _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || - _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping") || - _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || - _load_object(&state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms") || - _load_object(&state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms")) { - return 1; - } - /* Reload our REType hack too. */ - empty_string = PyBytes_FromString(""); - if (empty_string == NULL) { - state->REType = NULL; - return 1; - } - - if (_load_object(&re_compile, "re", "compile")) { - state->REType = NULL; - Py_DECREF(empty_string); - return 1; - } - - compiled = PyObject_CallFunction(re_compile, "O", empty_string); - Py_DECREF(re_compile); - if (compiled == NULL) { - state->REType = NULL; - Py_DECREF(empty_string); - return 1; - } - Py_INCREF(Py_TYPE(compiled)); - state->REType = Py_TYPE(compiled); - Py_DECREF(empty_string); - Py_DECREF(compiled); - return 0; -} - -/* - * Get the _type_marker from an Object. - * - * Return the type marker, 0 if there is no marker, or -1 on failure. - */ -static long _type_marker(PyObject* object, PyObject* _type_marker_str) { - PyObject* type_marker = NULL; - long type = 0; - - if (PyObject_HasAttr(object, _type_marker_str)) { - type_marker = PyObject_GetAttr(object, _type_marker_str); - if (type_marker == NULL) { - return -1; - } - } - - /* - * Python objects with broken __getattr__ implementations could return - * arbitrary types for a call to PyObject_GetAttrString. For example - * pymongo.database.Database returns a new Collection instance for - * __getattr__ calls with names that don't match an existing attribute - * or method. In some cases "value" could be a subtype of something - * we know how to serialize. Make a best effort to encode these types. - */ - if (type_marker && PyLong_CheckExact(type_marker)) { - type = PyLong_AsLong(type_marker); - Py_DECREF(type_marker); - } else { - Py_XDECREF(type_marker); - } - - return type; -} - -/* Fill out a type_registry_t* from a TypeRegistry object. - * - * Return 1 on success. options->document_class is a new reference. - * Return 0 on failure. - */ -int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { - registry->encoder_map = NULL; - registry->decoder_map = NULL; - registry->fallback_encoder = NULL; - registry->registry_obj = NULL; - - registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); - if (registry->encoder_map == NULL) { - goto fail; - } - registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); - - registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); - if (registry->decoder_map == NULL) { - goto fail; - } - registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); - - registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); - if (registry->fallback_encoder == NULL) { - goto fail; - } - registry->has_fallback_encoder = (registry->fallback_encoder != Py_None); - - registry->registry_obj = registry_obj; - Py_INCREF(registry->registry_obj); - return 1; - -fail: - Py_XDECREF(registry->encoder_map); - Py_XDECREF(registry->decoder_map); - Py_XDECREF(registry->fallback_encoder); - return 0; -} - -/* Fill out a codec_options_t* from a CodecOptions object. - * - * Return 1 on success. options->document_class is a new reference. - * Return 0 on failure. - */ -int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { - PyObject* type_registry_obj = NULL; - struct module_state *state = GETSTATE(self); - long type_marker; - - options->unicode_decode_error_handler = NULL; - - if (!PyArg_ParseTuple(options_obj, "ObbzOOb", - &options->document_class, - &options->tz_aware, - &options->uuid_rep, - &options->unicode_decode_error_handler, - &options->tzinfo, - &type_registry_obj, - &options->datetime_conversion)) { - return 0; - } - - type_marker = _type_marker(options->document_class, - state->_type_marker_str); - if (type_marker < 0) { - return 0; - } - - if (!cbson_convert_type_registry(type_registry_obj, - &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { - return 0; - } - - options->is_raw_bson = (101 == type_marker); - options->options_obj = options_obj; - - Py_INCREF(options->options_obj); - Py_INCREF(options->document_class); - Py_INCREF(options->tzinfo); - - return 1; -} - -void destroy_codec_options(codec_options_t* options) { - Py_CLEAR(options->document_class); - Py_CLEAR(options->tzinfo); - Py_CLEAR(options->options_obj); - Py_CLEAR(options->type_registry.registry_obj); - Py_CLEAR(options->type_registry.encoder_map); - Py_CLEAR(options->type_registry.decoder_map); - Py_CLEAR(options->type_registry.fallback_encoder); -} - -static int write_element_to_buffer(PyObject* self, buffer_t buffer, - int type_byte, PyObject* value, - unsigned char check_keys, - const codec_options_t* options, - unsigned char in_custom_call, - unsigned char in_fallback_call) { - int result = 0; - if(Py_EnterRecursiveCall(" while encoding an object to BSON ")) { - return 0; - } - result = _write_element_to_buffer(self, buffer, type_byte, - value, check_keys, options, - in_custom_call, in_fallback_call); - Py_LeaveRecursiveCall(); - return result; -} - -static void -_set_cannot_encode(PyObject* value) { - if (PyLong_Check(value)) { - if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { - return PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - } - } - - PyObject* type = NULL; - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument == NULL) { - goto error; - } - - type = PyObject_Type(value); - if (type == NULL) { - goto error; - } - PyErr_Format(InvalidDocument, "cannot encode object: %R, of type: %R", - value, type); -error: - Py_XDECREF(type); - Py_XDECREF(InvalidDocument); -} - -/* - * Encode a builtin Python regular expression or our custom Regex class. - * - * Sets exception and returns 0 on failure. - */ -static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { - - PyObject* py_flags; - PyObject* py_pattern; - PyObject* encoded_pattern; - PyObject* decoded_pattern; - long int_flags; - char flags[FLAGS_SIZE]; - char check_utf8 = 0; - const char* pattern_data; - int pattern_length, flags_length; - - /* - * Both the builtin re type and our Regex class have attributes - * "flags" and "pattern". - */ - py_flags = PyObject_GetAttr(value, _flags_str); - if (!py_flags) { - return 0; - } - int_flags = PyLong_AsLong(py_flags); - Py_DECREF(py_flags); - if (int_flags == -1 && PyErr_Occurred()) { - return 0; - } - py_pattern = PyObject_GetAttr(value, _pattern_str); - if (!py_pattern) { - return 0; - } - - if (PyUnicode_Check(py_pattern)) { - encoded_pattern = PyUnicode_AsUTF8String(py_pattern); - Py_DECREF(py_pattern); - if (!encoded_pattern) { - return 0; - } - } else { - encoded_pattern = py_pattern; - check_utf8 = 1; - } - - if (!(pattern_data = PyBytes_AsString(encoded_pattern))) { - Py_DECREF(encoded_pattern); - return 0; - } - if ((pattern_length = _downcast_and_check(PyBytes_Size(encoded_pattern), 0)) == -1) { - Py_DECREF(encoded_pattern); - return 0; - } - - if (strlen(pattern_data) != (size_t) pattern_length){ - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "regex patterns must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - } - Py_DECREF(encoded_pattern); - return 0; - } - - if (check_utf8) { - decoded_pattern = PyUnicode_DecodeUTF8(pattern_data, (Py_ssize_t) pattern_length, NULL); - if (decoded_pattern == NULL) { - PyErr_Clear(); - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded_pattern); - return 0; - } - Py_DECREF(decoded_pattern); - } - - if (!buffer_write_bytes(buffer, pattern_data, pattern_length + 1)) { - Py_DECREF(encoded_pattern); - return 0; - } - Py_DECREF(encoded_pattern); - - flags[0] = 0; - - if (int_flags & 2) { - STRCAT(flags, FLAGS_SIZE, "i"); - } - if (int_flags & 4) { - STRCAT(flags, FLAGS_SIZE, "l"); - } - if (int_flags & 8) { - STRCAT(flags, FLAGS_SIZE, "m"); - } - if (int_flags & 16) { - STRCAT(flags, FLAGS_SIZE, "s"); - } - if (int_flags & 32) { - STRCAT(flags, FLAGS_SIZE, "u"); - } - if (int_flags & 64) { - STRCAT(flags, FLAGS_SIZE, "x"); - } - flags_length = (int)strlen(flags) + 1; - if (!buffer_write_bytes(buffer, flags, flags_length)) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0B; - return 1; -} - -/* Write a single value to the buffer (also write its type_byte, for which - * space has already been reserved. - * - * returns 0 on failure */ -static int _write_element_to_buffer(PyObject* self, buffer_t buffer, - int type_byte, PyObject* value, - unsigned char check_keys, - const codec_options_t* options, - unsigned char in_custom_call, - unsigned char in_fallback_call) { - struct module_state *state = GETSTATE(self); - PyObject* mapping_type; - PyObject* new_value = NULL; - int retval; - PyObject* uuid_type; - int is_list; - /* - * Don't use PyObject_IsInstance for our custom types. It causes - * problems with python sub interpreters. Our custom types should - * have a _type_marker attribute, which we can switch on instead. - */ - long type = _type_marker(value, state->_type_marker_str); - if (type < 0) { - return 0; - } - - switch (type) { - case 5: - { - /* Binary */ - PyObject* subtype_object; - char subtype; - const char* data; - int size; - - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttr(value, state->_subtype_str); - if (!subtype_object) { - return 0; - } - subtype = (char)PyLong_AsLong(subtype_object); - if (subtype == -1) { - Py_DECREF(subtype_object); - return 0; - } - size = _downcast_and_check(PyBytes_Size(value), 0); - if (size == -1) { - Py_DECREF(subtype_object); - return 0; - } - - Py_DECREF(subtype_object); - if (subtype == 2) { - int other_size = _downcast_and_check(PyBytes_Size(value), 4); - if (other_size == -1) - return 0; - if (!buffer_write_int32(buffer, other_size)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - } - if (!buffer_write_int32(buffer, size)) { - return 0; - } - if (subtype != 2) { - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - } - data = PyBytes_AsString(value); - if (!data) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; - } - case 7: - { - /* ObjectId */ - const char* data; - PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); - if (!pystring) { - return 0; - } - data = PyBytes_AsString(pystring); - if (!data) { - Py_DECREF(pystring); - return 0; - } - if (!buffer_write_bytes(buffer, data, 12)) { - Py_DECREF(pystring); - return 0; - } - Py_DECREF(pystring); - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; - return 1; - } - case 11: - { - /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); - } - case 13: - { - /* Code */ - int start_position, - length_location, - length; - - PyObject* scope = PyObject_GetAttr(value, state->_scope_str); - if (!scope) { - return 0; - } - - if (scope == Py_None) { - Py_DECREF(scope); - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0D; - return write_string(buffer, value); - } - - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0F; - - start_position = pymongo_buffer_get_position(buffer); - /* save space for length */ - length_location = pymongo_buffer_save_space(buffer, 4); - if (length_location == -1) { - Py_DECREF(scope); - return 0; - } - - if (!write_string(buffer, value)) { - Py_DECREF(scope); - return 0; - } - - if (!write_dict(self, buffer, scope, 0, options, 0)) { - Py_DECREF(scope); - return 0; - } - Py_DECREF(scope); - - length = pymongo_buffer_get_position(buffer) - start_position; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)length); - return 1; - } - case 17: - { - /* Timestamp */ - PyObject* obj; - unsigned long i; - - obj = PyObject_GetAttr(value, state->_inc_str); - if (!obj) { - return 0; - } - i = PyLong_AsUnsignedLong(obj); - Py_DECREF(obj); - if (i == (unsigned long)-1 && PyErr_Occurred()) { - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)i)) { - return 0; - } - - obj = PyObject_GetAttr(value, state->_time_str); - if (!obj) { - return 0; - } - i = PyLong_AsUnsignedLong(obj); - Py_DECREF(obj); - if (i == (unsigned long)-1 && PyErr_Occurred()) { - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)i)) { - return 0; - } - - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x11; - return 1; - } - case 18: - { - /* Int64 */ - const long long ll = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - if (!buffer_write_int64(buffer, (int64_t)ll)) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; - return 1; - } - case 19: - { - /* Decimal128 */ - const char* data; - PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); - if (!pystring) { - return 0; - } - data = PyBytes_AsString(pystring); - if (!data) { - Py_DECREF(pystring); - return 0; - } - if (!buffer_write_bytes(buffer, data, 16)) { - Py_DECREF(pystring); - return 0; - } - Py_DECREF(pystring); - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x13; - return 1; - } - case 100: - { - /* DBRef */ - PyObject* as_doc = PyObject_CallMethodObjArgs(value, state->_as_doc_str, NULL); - if (!as_doc) { - return 0; - } - if (!write_dict(self, buffer, as_doc, 0, options, 0)) { - Py_DECREF(as_doc); - return 0; - } - Py_DECREF(as_doc); - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; - return 1; - } - case 101: - { - /* RawBSONDocument */ - if (!write_raw_doc(buffer, value, state->_raw_str)) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; - return 1; - } - case 255: - { - /* MinKey */ - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0xFF; - return 1; - } - case 127: - { - /* MaxKey */ - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x7F; - return 1; - } - } - - /* No _type_marker attribute or not one of our types. */ - - if (PyBool_Check(value)) { - const char c = (value == Py_True) ? 0x01 : 0x00; - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x08; - return buffer_write_bytes(buffer, &c, 1); - } - else if (PyLong_Check(value)) { - const long long long_long_value = PyLong_AsLongLong(value); - if (long_long_value == -1 && PyErr_Occurred()) { - /* Ignore error and give the fallback_encoder a chance. */ - PyErr_Clear(); - } else if (-2147483648LL <= long_long_value && long_long_value <= 2147483647LL) { - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_int32(buffer, (int32_t)long_long_value); - } else { - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_int64(buffer, (int64_t)long_long_value); - } - } else if (PyFloat_Check(value)) { - const double d = PyFloat_AsDouble(value); - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; - return buffer_write_double(buffer, d); - } else if (value == Py_None) { - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0A; - return 1; - } else if (PyDict_Check(value)) { - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; - return write_dict(self, buffer, value, check_keys, options, 0); - } else if ((is_list = PyList_Check(value)) || PyTuple_Check(value)) { - Py_ssize_t items, i; - int start_position, - length_location, - length; - char zero = 0; - - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = pymongo_buffer_get_position(buffer); - - /* save space for length */ - length_location = pymongo_buffer_save_space(buffer, 4); - if (length_location == -1) { - return 0; - } - if (is_list) { - items = PyList_Size(value); - } else { - items = PyTuple_Size(value); - } - if (items > BSON_MAX_SIZE) { - PyObject* BSONError = _error("BSONError"); - if (BSONError) { - PyErr_SetString(BSONError, - "Too many items to serialize."); - Py_DECREF(BSONError); - } - return 0; - } - for(i = 0; i < items; i++) { - int list_type_byte = pymongo_buffer_save_space(buffer, 1); - char name[BUF_SIZE]; - PyObject* item_value; - - if (list_type_byte == -1) { - return 0; - } - int res = LL2STR(name, (long long)i); - if (res == -1) { - return 0; - } - if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { - return 0; - } - if (is_list) { - item_value = PyList_GET_ITEM(value, i); - } else { - item_value = PyTuple_GET_ITEM(value, i); - } - if (!item_value) { - return 0; - } - if (!write_element_to_buffer(self, buffer, list_type_byte, - item_value, check_keys, options, - 0, 0)) { - return 0; - } - } - - /* write null byte and fill in length */ - if (!buffer_write_bytes(buffer, &zero, 1)) { - return 0; - } - length = pymongo_buffer_get_position(buffer) - start_position; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)length); - return 1; - /* Python3 special case. Store bytes as BSON binary subtype 0. */ - } else if (PyBytes_Check(value)) { - char subtype = 0; - int size; - const char* data = PyBytes_AS_STRING(value); - if (!data) - return 0; - if ((size = _downcast_and_check(PyBytes_GET_SIZE(value), 0)) == -1) - return 0; - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; - } else if (PyUnicode_Check(value)) { - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x02; - return write_unicode(buffer, value); - } else if (PyDateTime_Check(value)) { - long long millis; - PyObject* utcoffset = PyObject_CallMethodObjArgs(value, state->_utcoffset_str , NULL); - if (utcoffset == NULL) - return 0; - if (utcoffset != Py_None) { - PyObject* result = PyNumber_Subtract(value, utcoffset); - Py_DECREF(utcoffset); - if (!result) { - return 0; - } - millis = millis_from_datetime(result); - Py_DECREF(result); - } else { - millis = millis_from_datetime(value); - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; - return buffer_write_int64(buffer, (int64_t)millis); - } else if (PyObject_TypeCheck(value, (PyTypeObject *) state->DatetimeMS)) { - long long millis; - if (!millis_from_datetime_ms(value, &millis)) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; - return buffer_write_int64(buffer, (int64_t)millis); - } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); - } - - /* - * Try Mapping and UUID last since we have to import - * them if we're in a sub-interpreter. - */ - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - if (mapping_type && PyObject_IsInstance(value, mapping_type)) { - Py_DECREF(mapping_type); - /* PyObject_IsInstance returns -1 on error */ - if (PyErr_Occurred()) { - return 0; - } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; - return write_dict(self, buffer, value, check_keys, options, 0); - } - - uuid_type = _get_object(state->UUID, "uuid", "UUID"); - if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - PyObject* binary_type = NULL; - PyObject* binary_value = NULL; - PyObject *uuid_rep_obj = NULL; - int result; - - Py_DECREF(uuid_type); - /* PyObject_IsInstance returns -1 on error */ - if (PyErr_Occurred()) { - return 0; - } - - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { - return 0; - } - - if (!(uuid_rep_obj = PyLong_FromLong(options->uuid_rep))) { - return 0; - } - binary_value = PyObject_CallMethodObjArgs(binary_type, state->_from_uuid_str, value, uuid_rep_obj, NULL); - Py_DECREF(uuid_rep_obj); - - if (binary_value == NULL) { - Py_DECREF(binary_type); - return 0; - } - - result = _write_element_to_buffer(self, buffer, - type_byte, binary_value, - check_keys, options, - in_custom_call, - in_fallback_call); - Py_DECREF(binary_type); - Py_DECREF(binary_value); - return result; - } - Py_XDECREF(mapping_type); - Py_XDECREF(uuid_type); - - /* Try a custom encoder if one is provided and we have not already - * attempted to use a type encoder. */ - if (!in_custom_call && !options->type_registry.is_encoder_empty) { - PyObject* value_type = NULL; - PyObject* converter = NULL; - value_type = PyObject_Type(value); - if (value_type == NULL) { - return 0; - } - converter = PyDict_GetItem(options->type_registry.encoder_map, value_type); - Py_XDECREF(value_type); - if (converter != NULL) { - /* Transform types that have a registered converter. - * A new reference is created upon transformation. */ - new_value = PyObject_CallFunctionObjArgs(converter, value, NULL); - if (new_value == NULL) { - return 0; - } - retval = write_element_to_buffer(self, buffer, type_byte, new_value, - check_keys, options, 1, 0); - Py_XDECREF(new_value); - return retval; - } - } - - /* Try the fallback encoder if one is provided and we have not already - * attempted to use the fallback encoder. */ - if (!in_fallback_call && options->type_registry.has_fallback_encoder) { - new_value = PyObject_CallFunctionObjArgs( - options->type_registry.fallback_encoder, value, NULL); - if (new_value == NULL) { - // propagate any exception raised by the callback - return 0; - } - retval = write_element_to_buffer(self, buffer, type_byte, new_value, - check_keys, options, 0, 1); - Py_XDECREF(new_value); - return retval; - } - - /* We can't determine value's type. Fail. */ - _set_cannot_encode(value); - return 0; -} - -static int check_key_name(const char* name, int name_length) { - - if (name_length > 0 && name[0] == '$') { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyObject* errmsg = PyUnicode_FromFormat( - "key '%s' must not start with '$'", name); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } - Py_DECREF(InvalidDocument); - } - return 0; - } - if (strchr(name, '.')) { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyObject* errmsg = PyUnicode_FromFormat( - "key '%s' must not contain '.'", name); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } - Py_DECREF(InvalidDocument); - } - return 0; - } - return 1; -} - -/* Write a (key, value) pair to the buffer. - * - * Returns 0 on failure */ -int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_length, - PyObject* value, unsigned char check_keys, - const codec_options_t* options, unsigned char allow_id) { - int type_byte; - - /* Don't write any _id elements unless we're explicitly told to - - * _id has to be written first so we do so, but don't bother - * deleting it from the dictionary being written. */ - if (!allow_id && strcmp(name, "_id") == 0) { - return 1; - } - - type_byte = pymongo_buffer_save_space(buffer, 1); - if (type_byte == -1) { - return 0; - } - if (check_keys && !check_key_name(name, name_length)) { - return 0; - } - if (!buffer_write_bytes(buffer, name, name_length + 1)) { - return 0; - } - if (!write_element_to_buffer(self, buffer, type_byte, - value, check_keys, options, 0, 0)) { - return 0; - } - return 1; -} - -int decode_and_write_pair(PyObject* self, buffer_t buffer, - PyObject* key, PyObject* value, - unsigned char check_keys, - const codec_options_t* options, - unsigned char top_level) { - PyObject* encoded; - const char* data; - int size; - if (PyUnicode_Check(key)) { - encoded = PyUnicode_AsUTF8String(key); - if (!encoded) { - return 0; - } - if (!(data = PyBytes_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } - if (strlen(data) != (size_t)(size - 1)) { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - } - Py_DECREF(encoded); - return 0; - } - } else { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyObject* repr = PyObject_Repr(key); - if (repr) { - PyObject* errmsg = PyUnicode_FromString( - "documents must have only string keys, key was "); - if (errmsg) { - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(InvalidDocument, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); - } else { - Py_DECREF(repr); - } - } - Py_DECREF(InvalidDocument); - } - return 0; - } - - /* If top_level is True, don't allow writing _id here - it was already written. */ - if (!write_pair(self, buffer, data, - size - 1, value, check_keys, options, !top_level)) { - Py_DECREF(encoded); - return 0; - } - - Py_DECREF(encoded); - return 1; -} - - -/* Write a RawBSONDocument to the buffer. - * Returns the number of bytes written or 0 on failure. - */ -static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { - char* bytes; - Py_ssize_t len; - int len_int; - int bytes_written = 0; - PyObject* bytes_obj = NULL; - - bytes_obj = PyObject_GetAttr(raw, _raw_str); - if (!bytes_obj) { - goto fail; - } - - if (-1 == PyBytes_AsStringAndSize(bytes_obj, &bytes, &len)) { - goto fail; - } - len_int = _downcast_and_check(len, 0); - if (-1 == len_int) { - goto fail; - } - if (!buffer_write_bytes(buffer, bytes, len_int)) { - goto fail; - } - bytes_written = len_int; -fail: - Py_XDECREF(bytes_obj); - return bytes_written; -} - -/* returns the number of bytes written or 0 on failure */ -int write_dict(PyObject* self, buffer_t buffer, - PyObject* dict, unsigned char check_keys, - const codec_options_t* options, unsigned char top_level) { - PyObject* key; - PyObject* iter; - char zero = 0; - int length; - int length_location; - struct module_state *state = GETSTATE(self); - PyObject* mapping_type; - long type_marker; - int is_dict = PyDict_Check(dict); - - if (!is_dict) { - /* check for RawBSONDocument */ - type_marker = _type_marker(dict, state->_type_marker_str); - if (type_marker < 0) { - return 0; - } - - if (101 == type_marker) { - return write_raw_doc(buffer, dict, state->_raw_str); - } - - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - - if (mapping_type) { - if (!PyObject_IsInstance(dict, mapping_type)) { - PyObject* repr; - Py_DECREF(mapping_type); - if ((repr = PyObject_Repr(dict))) { - PyObject* errmsg = PyUnicode_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyObject* error = PyUnicode_Concat(errmsg, repr); - if (error) { - PyErr_SetObject(PyExc_TypeError, error); - Py_DECREF(error); - } - Py_DECREF(errmsg); - Py_DECREF(repr); - } - else { - Py_DECREF(repr); - } - } else { - PyErr_SetString(PyExc_TypeError, - "encoder expected a mapping type"); - } - - return 0; - } - Py_DECREF(mapping_type); - /* PyObject_IsInstance returns -1 on error */ - if (PyErr_Occurred()) { - return 0; - } - } - } - - length_location = pymongo_buffer_save_space(buffer, 4); - if (length_location == -1) { - return 0; - } - - /* Write _id first if this is a top level doc. */ - if (top_level) { - /* - * If "dict" is a defaultdict we don't want to call - * PyObject_GetItem on it. That would **create** - * an _id where one didn't previously exist (PYTHON-871). - */ - if (is_dict) { - /* PyDict_GetItem returns a borrowed reference. */ - PyObject* _id = PyDict_GetItem(dict, state->_id_str); - if (_id) { - if (!write_pair(self, buffer, "_id", 3, - _id, check_keys, options, 1)) { - return 0; - } - } - } else if (PyMapping_HasKey(dict, state->_id_str)) { - PyObject* _id = PyObject_GetItem(dict, state->_id_str); - if (!_id) { - return 0; - } - if (!write_pair(self, buffer, "_id", 3, - _id, check_keys, options, 1)) { - Py_DECREF(_id); - return 0; - } - /* PyObject_GetItem returns a new reference. */ - Py_DECREF(_id); - } - } - - if (is_dict) { - PyObject* value; - Py_ssize_t pos = 0; - while (PyDict_Next(dict, &pos, &key, &value)) { - if (!decode_and_write_pair(self, buffer, key, value, - check_keys, options, top_level)) { - return 0; - } - } - } else { - iter = PyObject_GetIter(dict); - if (iter == NULL) { - return 0; - } - while ((key = PyIter_Next(iter)) != NULL) { - PyObject* value = PyObject_GetItem(dict, key); - if (!value) { - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - Py_DECREF(iter); - return 0; - } - if (!decode_and_write_pair(self, buffer, key, value, - check_keys, options, top_level)) { - Py_DECREF(key); - Py_DECREF(value); - Py_DECREF(iter); - return 0; - } - Py_DECREF(key); - Py_DECREF(value); - } - Py_DECREF(iter); - if (PyErr_Occurred()) { - return 0; - } - } - - /* write null byte and fill in length */ - if (!buffer_write_bytes(buffer, &zero, 1)) { - return 0; - } - length = pymongo_buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)length); - return length; -} - -static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { - PyObject* dict; - PyObject* result; - unsigned char check_keys; - unsigned char top_level = 1; - PyObject* options_obj; - codec_options_t options; - buffer_t buffer; - PyObject* raw_bson_document_bytes_obj; - long type_marker; - struct module_state *state = GETSTATE(self); - - if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, - &options_obj, &top_level) && - convert_codec_options(self, options_obj, &options))) { - return NULL; - } - - /* check for RawBSONDocument */ - type_marker = _type_marker(dict, state->_type_marker_str); - if (type_marker < 0) { - destroy_codec_options(&options); - return NULL; - } else if (101 == type_marker) { - destroy_codec_options(&options); - raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); - if (NULL == raw_bson_document_bytes_obj) { - return NULL; - } - return raw_bson_document_bytes_obj; - } - - buffer = pymongo_buffer_new(); - if (!buffer) { - destroy_codec_options(&options); - return NULL; - } - - if (!write_dict(self, buffer, dict, check_keys, &options, top_level)) { - destroy_codec_options(&options); - pymongo_buffer_free(buffer); - return NULL; - } - - /* objectify buffer */ - result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), - (Py_ssize_t)pymongo_buffer_get_position(buffer)); - destroy_codec_options(&options); - pymongo_buffer_free(buffer); - return result; -} - -/* - * Hook for optional decoding BSON documents to DBRef. - */ -static PyObject *_dbref_hook(PyObject* self, PyObject* value) { - struct module_state *state = GETSTATE(self); - PyObject* dbref = NULL; - PyObject* dbref_type = NULL; - PyObject* ref = NULL; - PyObject* id = NULL; - PyObject* database = NULL; - PyObject* ret = NULL; - int db_present = 0; - - /* Decoding for DBRefs */ - if (PyMapping_HasKey(value, state->_dollar_ref_str) && PyMapping_HasKey(value, state->_dollar_id_str)) { /* DBRef */ - ref = PyObject_GetItem(value, state->_dollar_ref_str); - /* PyObject_GetItem returns NULL to indicate error. */ - if (!ref) { - goto invalid; - } - id = PyObject_GetItem(value, state->_dollar_id_str); - /* PyObject_GetItem returns NULL to indicate error. */ - if (!id) { - goto invalid; - } - - if (PyMapping_HasKey(value, state->_dollar_db_str)) { - database = PyObject_GetItem(value, state->_dollar_db_str); - if (!database) { - goto invalid; - } - db_present = 1; - } else { - database = Py_None; - Py_INCREF(database); - } - - // check types - if (!(PyUnicode_Check(ref) && (database == Py_None || PyUnicode_Check(database)))) { - ret = value; - goto invalid; - } - - PyMapping_DelItem(value, state->_dollar_ref_str); - PyMapping_DelItem(value, state->_dollar_id_str); - if (db_present) { - PyMapping_DelItem(value, state->_dollar_db_str); - } - - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, ref, id, database, value, NULL); - Py_DECREF(value); - ret = dbref; - } - } else { - ret = value; - } -invalid: - Py_XDECREF(dbref_type); - Py_XDECREF(ref); - Py_XDECREF(id); - Py_XDECREF(database); - return ret; -} - -static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, - unsigned* position, unsigned char type, - unsigned max, const codec_options_t* options, int raw_array) { - struct module_state *state = GETSTATE(self); - PyObject* value = NULL; - switch (type) { - case 1: - { - double d; - if (max < 8) { - goto invalid; - } - memcpy(&d, buffer + *position, 8); - value = PyFloat_FromDouble(BSON_DOUBLE_FROM_LE(d)); - *position += 8; - break; - } - case 2: - case 14: - { - uint32_t value_length; - if (max < 4) { - goto invalid; - } - memcpy(&value_length, buffer + *position, 4); - value_length = BSON_UINT32_FROM_LE(value_length); - /* Encoded string length + string */ - if (!value_length || max < value_length || max < 4 + value_length) { - goto invalid; - } - *position += 4; - /* Strings must end in \0 */ - if (buffer[*position + value_length - 1]) { - goto invalid; - } - value = PyUnicode_DecodeUTF8( - buffer + *position, value_length - 1, - options->unicode_decode_error_handler); - if (!value) { - goto invalid; - } - *position += value_length; - break; - } - case 3: - { - uint32_t size; - - if (max < 4) { - goto invalid; - } - memcpy(&size, buffer + *position, 4); - size = BSON_UINT32_FROM_LE(size); - if (size < BSON_MIN_SIZE || max < size) { - goto invalid; - } - /* Check for bad eoo */ - if (buffer[*position + size - 1]) { - goto invalid; - } - - if (options->is_raw_bson) { - value = PyObject_CallFunction( - options->document_class, "y#O", - buffer + *position, (Py_ssize_t)size, options->options_obj); - if (!value) { - goto invalid; - } - *position += size; - break; - } - - value = elements_to_dict(self, buffer + *position + 4, - size - 5, options); - if (!value) { - goto invalid; - } - - /* Hook for DBRefs */ - value = _dbref_hook(self, value); - if (!value) { - goto invalid; - } - - *position += size; - break; - } - case 4: - { - uint32_t size, end; - - if (max < 4) { - goto invalid; - } - memcpy(&size, buffer + *position, 4); - size = BSON_UINT32_FROM_LE(size); - if (size < BSON_MIN_SIZE || max < size) { - goto invalid; - } - - end = *position + size - 1; - /* Check for bad eoo */ - if (buffer[end]) { - goto invalid; - } - - if (raw_array != 0) { - // Treat it as a binary buffer. - value = PyBytes_FromStringAndSize(buffer + *position, size); - *position += size; - break; - } - - *position += 4; - - value = PyList_New(0); - if (!value) { - goto invalid; - } - while (*position < end) { - PyObject* to_append; - - unsigned char bson_type = (unsigned char)buffer[(*position)++]; - - size_t key_size = strlen(buffer + *position); - if (max < key_size) { - Py_DECREF(value); - goto invalid; - } - /* just skip the key, they're in order. */ - *position += (unsigned)key_size + 1; - if (Py_EnterRecursiveCall(" while decoding a list value")) { - Py_DECREF(value); - goto invalid; - } - to_append = get_value(self, name, buffer, position, bson_type, - max - (unsigned)key_size, options, raw_array); - Py_LeaveRecursiveCall(); - if (!to_append) { - Py_DECREF(value); - goto invalid; - } - if (PyList_Append(value, to_append) < 0) { - Py_DECREF(value); - Py_DECREF(to_append); - goto invalid; - } - Py_DECREF(to_append); - } - if (*position != end) { - goto invalid; - } - (*position)++; - break; - } - case 5: - { - PyObject* data; - PyObject* st; - PyObject* type_to_create; - uint32_t length, length2; - unsigned char subtype; - - if (max < 5) { - goto invalid; - } - memcpy(&length, buffer + *position, 4); - length = BSON_UINT32_FROM_LE(length); - if (max < length) { - goto invalid; - } - - subtype = (unsigned char)buffer[*position + 4]; - *position += 5; - if (subtype == 2) { - if (length < 4) { - goto invalid; - } - memcpy(&length2, buffer + *position, 4); - length2 = BSON_UINT32_FROM_LE(length2); - if (length2 != length - 4) { - goto invalid; - } - } - /* Python3 special case. Decode BSON binary subtype 0 to bytes. */ - if (subtype == 0) { - value = PyBytes_FromStringAndSize(buffer + *position, length); - *position += length; - break; - } - if (subtype == 2) { - data = PyBytes_FromStringAndSize(buffer + *position + 4, length - 4); - } else { - data = PyBytes_FromStringAndSize(buffer + *position, length); - } - if (!data) { - goto invalid; - } - /* Encode as UUID or Binary based on options->uuid_rep */ - if (subtype == 3 || subtype == 4) { - PyObject* binary_type = NULL; - PyObject* binary_value = NULL; - char uuid_rep = options->uuid_rep; - - /* UUID should always be 16 bytes */ - if (length != 16) { - goto uuiderror; - } - - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { - goto uuiderror; - } - - binary_value = PyObject_CallFunction(binary_type, "(Oi)", data, subtype); - if (binary_value == NULL) { - goto uuiderror; - } - - if ((uuid_rep == UNSPECIFIED) || - (subtype == 4 && uuid_rep != STANDARD) || - (subtype == 3 && uuid_rep == STANDARD)) { - value = binary_value; - Py_INCREF(value); - } else { - PyObject *uuid_rep_obj = PyLong_FromLong(uuid_rep); - if (!uuid_rep_obj) { - goto uuiderror; - } - value = PyObject_CallMethodObjArgs(binary_value, state->_as_uuid_str, uuid_rep_obj, NULL); - Py_DECREF(uuid_rep_obj); - } - - uuiderror: - Py_XDECREF(binary_type); - Py_XDECREF(binary_value); - Py_DECREF(data); - if (!value) { - goto invalid; - } - *position += length; - break; - } - - st = PyLong_FromLong(subtype); - if (!st) { - Py_DECREF(data); - goto invalid; - } - if ((type_to_create = _get_object(state->Binary, "bson.binary", "Binary"))) { - value = PyObject_CallFunctionObjArgs(type_to_create, data, st, NULL); - Py_DECREF(type_to_create); - } - Py_DECREF(st); - Py_DECREF(data); - if (!value) { - goto invalid; - } - *position += length; - break; - } - case 6: - case 10: - { - value = Py_None; - Py_INCREF(value); - break; - } - case 7: - { - PyObject* objectid_type; - if (max < 12) { - goto invalid; - } - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - value = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } - *position += 12; - break; - } - case 8: - { - char boolean_raw = buffer[(*position)++]; - if (0 == boolean_raw) { - value = Py_False; - } else if (1 == boolean_raw) { - value = Py_True; - } else { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_Format(InvalidBSON, "invalid boolean value: %x", boolean_raw); - Py_DECREF(InvalidBSON); - } - return NULL; - } - Py_INCREF(value); - break; - } - case 9: - { - PyObject* utc_type; - PyObject* naive; - PyObject* replace; - PyObject* args; - PyObject* kwargs; - PyObject* astimezone; - int64_t millis; - if (max < 8) { - goto invalid; - } - memcpy(&millis, buffer + *position, 8); - millis = (int64_t)BSON_UINT64_FROM_LE(millis); - *position += 8; - - if (options->datetime_conversion == DATETIME_MS){ - value = datetime_ms_from_millis(self, millis); - break; - } - - int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; - int dt_auto = options->datetime_conversion == DATETIME_AUTO; - - - if (dt_clamp || dt_auto){ - PyObject *min_millis_fn = _get_object(state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms"); - PyObject *max_millis_fn = _get_object(state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms"); - PyObject *min_millis_fn_res; - PyObject *max_millis_fn_res; - int64_t min_millis; - int64_t max_millis; - - if (min_millis_fn == NULL || max_millis_fn == NULL) { - Py_XDECREF(min_millis_fn); - Py_XDECREF(max_millis_fn); - goto invalid; - } - - if (options->tz_aware){ - PyObject* tzinfo = options->tzinfo; - if (tzinfo == Py_None) { - // Default to UTC. - utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - tzinfo = utc_type; - } - min_millis_fn_res = PyObject_CallFunctionObjArgs(min_millis_fn, tzinfo, NULL); - max_millis_fn_res = PyObject_CallFunctionObjArgs(max_millis_fn, tzinfo, NULL); - } else { - min_millis_fn_res = PyObject_CallObject(min_millis_fn, NULL); - max_millis_fn_res = PyObject_CallObject(max_millis_fn, NULL); - } - - Py_DECREF(min_millis_fn); - Py_DECREF(max_millis_fn); - - if (!min_millis_fn_res || !max_millis_fn_res){ - Py_XDECREF(min_millis_fn_res); - Py_XDECREF(max_millis_fn_res); - goto invalid; - } - - min_millis = PyLong_AsLongLong(min_millis_fn_res); - max_millis = PyLong_AsLongLong(max_millis_fn_res); - - if ((min_millis == -1 || max_millis == -1) && PyErr_Occurred()) - { - // min/max_millis check - goto invalid; - } - - if (dt_clamp) { - if (millis < min_millis) { - millis = min_millis; - } else if (millis > max_millis) { - millis = max_millis; - } - // Continues from here to return a datetime. - } else { // dt_auto - if (millis < min_millis || millis > max_millis){ - value = datetime_ms_from_millis(self, millis); - break; // Out-of-range so done. - } - } - } - - naive = datetime_from_millis(millis); - if (!options->tz_aware) { /* In the naive case, we're done here. */ - value = naive; - break; - } - - if (!naive) { - goto invalid; - } - replace = PyObject_GetAttr(naive, state->_replace_str); - Py_DECREF(naive); - if (!replace) { - goto invalid; - } - args = PyTuple_New(0); - if (!args) { - Py_DECREF(replace); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(replace); - Py_DECREF(args); - goto invalid; - } - utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - if (!utc_type || PyDict_SetItem(kwargs, state->_tzinfo_str, utc_type) == -1) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(utc_type); - goto invalid; - } - Py_XDECREF(utc_type); - value = PyObject_Call(replace, args, kwargs); - if (!value) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - - /* convert to local time */ - if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttr(value, state->_astimezone_str); - Py_DECREF(value); - if (!astimezone) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - value = PyObject_CallFunctionObjArgs(astimezone, options->tzinfo, NULL); - Py_DECREF(astimezone); - } - - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - break; - } - case 11: - { - PyObject* regex_class; - PyObject* pattern; - int flags; - size_t flags_length, i; - size_t pattern_length = strlen(buffer + *position); - if (pattern_length > BSON_MAX_SIZE || max < pattern_length) { - goto invalid; - } - pattern = PyUnicode_DecodeUTF8( - buffer + *position, pattern_length, - options->unicode_decode_error_handler); - if (!pattern) { - goto invalid; - } - *position += (unsigned)pattern_length + 1; - flags_length = strlen(buffer + *position); - if (flags_length > BSON_MAX_SIZE || - (BSON_MAX_SIZE - pattern_length) < flags_length) { - Py_DECREF(pattern); - goto invalid; - } - if (max < pattern_length + flags_length) { - Py_DECREF(pattern); - goto invalid; - } - flags = 0; - for (i = 0; i < flags_length; i++) { - if (buffer[*position + i] == 'i') { - flags |= 2; - } else if (buffer[*position + i] == 'l') { - flags |= 4; - } else if (buffer[*position + i] == 'm') { - flags |= 8; - } else if (buffer[*position + i] == 's') { - flags |= 16; - } else if (buffer[*position + i] == 'u') { - flags |= 32; - } else if (buffer[*position + i] == 'x') { - flags |= 64; - } - } - *position += (unsigned)flags_length + 1; - - regex_class = _get_object(state->Regex, "bson.regex", "Regex"); - if (regex_class) { - value = PyObject_CallFunction(regex_class, - "Oi", pattern, flags); - Py_DECREF(regex_class); - } - Py_DECREF(pattern); - break; - } - case 12: - { - uint32_t coll_length; - PyObject* collection; - PyObject* id = NULL; - PyObject* objectid_type; - PyObject* dbref_type; - - if (max < 4) { - goto invalid; - } - memcpy(&coll_length, buffer + *position, 4); - coll_length = BSON_UINT32_FROM_LE(coll_length); - /* Encoded string length + string + 12 byte ObjectId */ - if (!coll_length || max < coll_length || max < 4 + coll_length + 12) { - goto invalid; - } - *position += 4; - /* Strings must end in \0 */ - if (buffer[*position + coll_length - 1]) { - goto invalid; - } - - collection = PyUnicode_DecodeUTF8( - buffer + *position, coll_length - 1, - options->unicode_decode_error_handler); - if (!collection) { - goto invalid; - } - *position += coll_length; - - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - id = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } - if (!id) { - Py_DECREF(collection); - goto invalid; - } - *position += 12; - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - value = PyObject_CallFunctionObjArgs(dbref_type, collection, id, NULL); - Py_DECREF(dbref_type); - } - Py_DECREF(collection); - Py_DECREF(id); - break; - } - case 13: - { - PyObject* code; - PyObject* code_type; - uint32_t value_length; - if (max < 4) { - goto invalid; - } - memcpy(&value_length, buffer + *position, 4); - value_length = BSON_UINT32_FROM_LE(value_length); - /* Encoded string length + string */ - if (!value_length || max < value_length || max < 4 + value_length) { - goto invalid; - } - *position += 4; - /* Strings must end in \0 */ - if (buffer[*position + value_length - 1]) { - goto invalid; - } - code = PyUnicode_DecodeUTF8( - buffer + *position, value_length - 1, - options->unicode_decode_error_handler); - if (!code) { - goto invalid; - } - *position += value_length; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, NULL, NULL); - Py_DECREF(code_type); - } - Py_DECREF(code); - break; - } - case 15: - { - uint32_t c_w_s_size; - uint32_t code_size; - uint32_t scope_size; - PyObject* code; - PyObject* scope; - PyObject* code_type; - - if (max < 8) { - goto invalid; - } - - memcpy(&c_w_s_size, buffer + *position, 4); - c_w_s_size = BSON_UINT32_FROM_LE(c_w_s_size); - *position += 4; - - if (max < c_w_s_size) { - goto invalid; - } - - memcpy(&code_size, buffer + *position, 4); - code_size = BSON_UINT32_FROM_LE(code_size); - /* code_w_scope length + code length + code + scope length */ - if (!code_size || max < code_size || max < 4 + 4 + code_size + 4) { - goto invalid; - } - *position += 4; - /* Strings must end in \0 */ - if (buffer[*position + code_size - 1]) { - goto invalid; - } - code = PyUnicode_DecodeUTF8( - buffer + *position, code_size - 1, - options->unicode_decode_error_handler); - if (!code) { - goto invalid; - } - *position += code_size; - - memcpy(&scope_size, buffer + *position, 4); - scope_size = BSON_UINT32_FROM_LE(scope_size); - if (scope_size < BSON_MIN_SIZE) { - Py_DECREF(code); - goto invalid; - } - /* code length + code + scope length + scope */ - if ((4 + code_size + 4 + scope_size) != c_w_s_size) { - Py_DECREF(code); - goto invalid; - } - - /* Check for bad eoo */ - if (buffer[*position + scope_size - 1]) { - goto invalid; - } - scope = elements_to_dict(self, buffer + *position + 4, - scope_size - 5, options); - if (!scope) { - Py_DECREF(code); - goto invalid; - } - *position += scope_size; - - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, scope, NULL); - Py_DECREF(code_type); - } - Py_DECREF(code); - Py_DECREF(scope); - break; - } - case 16: - { - int32_t i; - if (max < 4) { - goto invalid; - } - memcpy(&i, buffer + *position, 4); - i = (int32_t)BSON_UINT32_FROM_LE(i); - value = PyLong_FromLong(i); - if (!value) { - goto invalid; - } - *position += 4; - break; - } - case 17: - { - uint32_t time, inc; - PyObject* timestamp_type; - if (max < 8) { - goto invalid; - } - memcpy(&inc, buffer + *position, 4); - memcpy(&time, buffer + *position + 4, 4); - inc = BSON_UINT32_FROM_LE(inc); - time = BSON_UINT32_FROM_LE(time); - if ((timestamp_type = _get_object(state->Timestamp, "bson.timestamp", "Timestamp"))) { - value = PyObject_CallFunction(timestamp_type, "II", time, inc); - Py_DECREF(timestamp_type); - } - *position += 8; - break; - } - case 18: - { - int64_t ll; - PyObject* bson_int64_type = _get_object(state->BSONInt64, - "bson.int64", "Int64"); - if (!bson_int64_type) - goto invalid; - if (max < 8) { - Py_DECREF(bson_int64_type); - goto invalid; - } - memcpy(&ll, buffer + *position, 8); - ll = (int64_t)BSON_UINT64_FROM_LE(ll); - value = PyObject_CallFunction(bson_int64_type, "L", ll); - *position += 8; - Py_DECREF(bson_int64_type); - break; - } - case 19: - { - PyObject* dec128; - if (max < 16) { - goto invalid; - } - if ((dec128 = _get_object(state->Decimal128, - "bson.decimal128", - "Decimal128"))) { - PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); - if (!_bytes_obj) { - Py_DECREF(dec128); - goto invalid; - } - value = PyObject_CallMethodObjArgs(dec128, state->_from_bid_str, _bytes_obj, NULL); - Py_DECREF(dec128); - Py_DECREF(_bytes_obj); - } - *position += 16; - break; - } - case 255: - { - PyObject* minkey_type = _get_object(state->MinKey, "bson.min_key", "MinKey"); - if (!minkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(minkey_type, NULL); - Py_DECREF(minkey_type); - break; - } - case 127: - { - PyObject* maxkey_type = _get_object(state->MaxKey, "bson.max_key", "MaxKey"); - if (!maxkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(maxkey_type, NULL); - Py_DECREF(maxkey_type); - break; - } - default: - { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyObject* bobj = PyBytes_FromFormat("%c", type); - if (bobj) { - PyObject* repr = PyObject_Repr(bobj); - Py_DECREF(bobj); - /* - * See http://bugs.python.org/issue22023 for why we can't - * just use PyUnicode_FromFormat with %S or %R to do this - * work. - */ - if (repr) { - PyObject* left = PyUnicode_FromString( - "Detected unknown BSON type "); - if (left) { - PyObject* lmsg = PyUnicode_Concat(left, repr); - Py_DECREF(left); - if (lmsg) { - PyObject* errmsg = PyUnicode_FromFormat( - "%U for fieldname '%U'. Are you using the " - "latest driver version?", lmsg, name); - if (errmsg) { - PyErr_SetObject(InvalidBSON, errmsg); - Py_DECREF(errmsg); - } - Py_DECREF(lmsg); - } - } - Py_DECREF(repr); - } - } - Py_DECREF(InvalidBSON); - } - goto invalid; - } - } - - if (value) { - if (!options->type_registry.is_decoder_empty) { - PyObject* value_type = NULL; - PyObject* converter = NULL; - value_type = PyObject_Type(value); - if (value_type == NULL) { - goto invalid; - } - converter = PyDict_GetItem(options->type_registry.decoder_map, value_type); - if (converter != NULL) { - PyObject* new_value = PyObject_CallFunctionObjArgs(converter, value, NULL); - Py_DECREF(value_type); - Py_DECREF(value); - return new_value; - } else { - Py_DECREF(value_type); - return value; - } - } - return value; - } - - invalid: - - /* - * Wrap any non-InvalidBSON errors in InvalidBSON. - */ - if (PyErr_Occurred()) { - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; - - /* - * Calling _error clears the error state, so fetch it first. - */ - PyErr_Fetch(&etype, &evalue, &etrace); - - /* Dont reraise anything but PyExc_Exceptions as InvalidBSON. */ - if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { - InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - if (!PyErr_GivenExceptionMatches(etype, InvalidBSON)) { - /* - * Raise InvalidBSON(str(e)). - */ - Py_DECREF(etype); - etype = InvalidBSON; - - if (evalue) { - PyObject *msg = PyObject_Str(evalue); - Py_DECREF(evalue); - evalue = msg; - } - PyErr_NormalizeException(&etype, &evalue, &etrace); - } else { - /* - * The current exception matches InvalidBSON, so we don't - * need this reference after all. - */ - Py_DECREF(InvalidBSON); - } - } - } - /* Steals references to args. */ - PyErr_Restore(etype, evalue, etrace); - } else { - PyObject *InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "invalid length or type code"); - Py_DECREF(InvalidBSON); - } - } - return NULL; -} - -/* - * Get the next 'name' and 'value' from a document in a string, whose position - * is provided. - * - * Returns the position of the next element in the document, or -1 on error. - */ -static int _element_to_dict(PyObject* self, const char* string, - unsigned position, unsigned max, - const codec_options_t* options, - int raw_array, - PyObject** name, PyObject** value) { - unsigned char type = (unsigned char)string[position++]; - size_t name_length = strlen(string + position); - if (name_length > BSON_MAX_SIZE || position + name_length >= max) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "field name too large"); - Py_DECREF(InvalidBSON); - } - return -1; - } - *name = PyUnicode_DecodeUTF8( - string + position, name_length, - options->unicode_decode_error_handler); - if (!*name) { - /* If NULL is returned then wrap the UnicodeDecodeError - in an InvalidBSON error */ - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; - - PyErr_Fetch(&etype, &evalue, &etrace); - if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { - InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - Py_DECREF(etype); - etype = InvalidBSON; - - if (evalue) { - PyObject *msg = PyObject_Str(evalue); - Py_DECREF(evalue); - evalue = msg; - } - PyErr_NormalizeException(&etype, &evalue, &etrace); - } - } - PyErr_Restore(etype, evalue, etrace); - return -1; - } - position += (unsigned)name_length + 1; - *value = get_value(self, *name, string, &position, type, - max - position, options, raw_array); - if (!*value) { - Py_DECREF(*name); - return -1; - } - return position; -} - -static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { - /* TODO: Support buffer protocol */ - char* string; - PyObject* bson; - PyObject* options_obj; - codec_options_t options; - unsigned position; - unsigned max; - int new_position; - int raw_array = 0; - PyObject* name; - PyObject* value; - PyObject* result_tuple; - - if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, - &options_obj, &raw_array) && - convert_codec_options(self, options_obj, &options))) { - return NULL; - } - - if (!PyBytes_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); - return NULL; - } - string = PyBytes_AS_STRING(bson); - - new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); - if (new_position < 0) { - return NULL; - } - - result_tuple = Py_BuildValue("NNi", name, value, new_position); - if (!result_tuple) { - Py_DECREF(name); - Py_DECREF(value); - return NULL; - } - - destroy_codec_options(&options); - return result_tuple; -} - -static PyObject* _elements_to_dict(PyObject* self, const char* string, - unsigned max, - const codec_options_t* options) { - unsigned position = 0; - PyObject* dict = PyObject_CallObject(options->document_class, NULL); - if (!dict) { - return NULL; - } - int raw_array = 0; - while (position < max) { - PyObject* name = NULL; - PyObject* value = NULL; - int new_position; - - new_position = _element_to_dict( - self, string, position, max, options, raw_array, &name, &value); - if (new_position < 0) { - Py_DECREF(dict); - return NULL; - } else { - position = (unsigned)new_position; - } - - PyObject_SetItem(dict, name, value); - Py_DECREF(name); - Py_DECREF(value); - } - return dict; -} - -static PyObject* elements_to_dict(PyObject* self, const char* string, - unsigned max, - const codec_options_t* options) { - PyObject* result; - if (Py_EnterRecursiveCall(" while decoding a BSON document")) - return NULL; - result = _elements_to_dict(self, string, max, options); - Py_LeaveRecursiveCall(); - return result; -} - -static int _get_buffer(PyObject *exporter, Py_buffer *view) { - if (PyObject_GetBuffer(exporter, view, PyBUF_SIMPLE) == -1) { - return 0; - } - if (!PyBuffer_IsContiguous(view, 'C')) { - PyErr_SetString(PyExc_ValueError, - "must be a contiguous buffer"); - goto fail; - } - if (!view->buf || view->len < 0) { - PyErr_SetString(PyExc_ValueError, "invalid buffer"); - goto fail; - } - if (view->itemsize != 1) { - PyErr_SetString(PyExc_ValueError, - "buffer data must be ascii or utf8"); - goto fail; - } - return 1; -fail: - PyBuffer_Release(view); - return 0; -} - -static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { - int32_t size; - Py_ssize_t total_size; - const char* string; - PyObject* bson; - codec_options_t options; - PyObject* result = NULL; - PyObject* options_obj; - Py_buffer view = {0}; - - if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(self, options_obj, &options))) { - return result; - } - - if (!_get_buffer(bson, &view)) { - destroy_codec_options(&options); - return result; - } - - total_size = view.len; - - if (total_size < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, - "not enough data for a BSON document"); - Py_DECREF(InvalidBSON); - } - goto done;; - } - - string = (char*)view.buf; - memcpy(&size, string, 4); - size = (int32_t)BSON_UINT32_FROM_LE(size); - if (size < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "invalid message size"); - Py_DECREF(InvalidBSON); - } - goto done; - } - - if (total_size < size || total_size > BSON_MAX_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "objsize too large"); - Py_DECREF(InvalidBSON); - } - goto done; - } - - if (size != total_size || string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "bad eoo"); - Py_DECREF(InvalidBSON); - } - goto done; - } - - /* No need to decode fields if using RawBSONDocument */ - if (options.is_raw_bson) { - result = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } - else { - result = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } -done: - PyBuffer_Release(&view); - destroy_codec_options(&options); - return result; -} - -static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { - int32_t size; - Py_ssize_t total_size; - const char* string; - PyObject* bson; - PyObject* dict; - PyObject* result = NULL; - codec_options_t options; - PyObject* options_obj = NULL; - Py_buffer view = {0}; - - if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(self, options_obj, &options))) { - return NULL; - } - - if (!_get_buffer(bson, &view)) { - destroy_codec_options(&options); - return NULL; - } - total_size = view.len; - string = (char*)view.buf; - - if (!(result = PyList_New(0))) { - goto fail; - } - - while (total_size > 0) { - if (total_size < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, - "not enough data for a BSON document"); - Py_DECREF(InvalidBSON); - } - Py_DECREF(result); - goto fail; - } - - memcpy(&size, string, 4); - size = (int32_t)BSON_UINT32_FROM_LE(size); - if (size < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "invalid message size"); - Py_DECREF(InvalidBSON); - } - Py_DECREF(result); - goto fail; - } - - if (total_size < size) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "objsize too large"); - Py_DECREF(InvalidBSON); - } - Py_DECREF(result); - goto fail; - } - - if (string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "bad eoo"); - Py_DECREF(InvalidBSON); - } - Py_DECREF(result); - goto fail; - } - - /* No need to decode fields if using RawBSONDocument. */ - if (options.is_raw_bson) { - dict = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } else { - dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } - if (!dict) { - Py_DECREF(result); - goto fail; - } - if (PyList_Append(result, dict) < 0) { - Py_DECREF(dict); - Py_DECREF(result); - goto fail; - } - Py_DECREF(dict); - string += size; - total_size -= size; - } - goto done; -fail: - result = NULL; -done: - PyBuffer_Release(&view); - destroy_codec_options(&options); - return result; -} - - -static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { - uint32_t size; - uint32_t value_length; - uint32_t position = 0; - buffer_t buffer; - const char* string; - PyObject* arr; - PyObject* result = NULL; - Py_buffer view = {0}; - - if (!PyArg_ParseTuple(args, "O", &arr)) { - return NULL; - } - - if (!_get_buffer(arr, &view)) { - return NULL; - } - - buffer = pymongo_buffer_new(); - if (!buffer) { - PyBuffer_Release(&view); - return NULL; - } - - string = (char*)view.buf; - - if (view.len < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, - "not enough data for a BSON document"); - Py_DECREF(InvalidBSON); - } - goto done; - } - - memcpy(&size, string, 4); - size = BSON_UINT32_FROM_LE(size); - /* save space for length */ - if (pymongo_buffer_save_space(buffer, size) == -1) { - goto fail; - } - pymongo_buffer_update_position(buffer, 0); - - position += 4; - while (position < size - 1) { - // Verify the value is an object. - unsigned char type = (unsigned char)string[position]; - if (type != 3) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "array element was not an object"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - // Just skip the keys. - position = position + strlen(string + position) + 1; - - if (position >= size || (size - position) < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "invalid array content"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - memcpy(&value_length, string + position, 4); - value_length = BSON_UINT32_FROM_LE(value_length); - if (value_length < BSON_MIN_SIZE) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "invalid message size"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - if (view.len < size) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "objsize too large"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - if (string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "bad eoo"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { - goto fail; - } - position += value_length; - } - - /* objectify buffer */ - result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), - (Py_ssize_t)pymongo_buffer_get_position(buffer)); - goto done; -fail: - result = NULL; -done: - PyBuffer_Release(&view); - pymongo_buffer_free(buffer); - return result; -} - - -static PyMethodDef _CBSONMethods[] = { - {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, - "convert a dictionary to a string containing its BSON representation."}, - {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, - "convert a BSON string to a SON object."}, - {"_decode_all", _cbson_decode_all, METH_VARARGS, - "convert binary data to a sequence of documents."}, - {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, - "Decode a single key, value pair."}, - {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, - {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, - {NULL, NULL, 0, NULL} -}; - -#define INITERROR return NULL -static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->Binary); - Py_VISIT(GETSTATE(m)->Code); - Py_VISIT(GETSTATE(m)->ObjectId); - Py_VISIT(GETSTATE(m)->DBRef); - Py_VISIT(GETSTATE(m)->Regex); - Py_VISIT(GETSTATE(m)->UUID); - Py_VISIT(GETSTATE(m)->Timestamp); - Py_VISIT(GETSTATE(m)->MinKey); - Py_VISIT(GETSTATE(m)->MaxKey); - Py_VISIT(GETSTATE(m)->UTC); - Py_VISIT(GETSTATE(m)->REType); - Py_VISIT(GETSTATE(m)->_type_marker_str); - Py_VISIT(GETSTATE(m)->_flags_str); - Py_VISIT(GETSTATE(m)->_pattern_str); - Py_VISIT(GETSTATE(m)->_encoder_map_str); - Py_VISIT(GETSTATE(m)->_decoder_map_str); - Py_VISIT(GETSTATE(m)->_fallback_encoder_str); - Py_VISIT(GETSTATE(m)->_raw_str); - Py_VISIT(GETSTATE(m)->_subtype_str); - Py_VISIT(GETSTATE(m)->_binary_str); - Py_VISIT(GETSTATE(m)->_scope_str); - Py_VISIT(GETSTATE(m)->_inc_str); - Py_VISIT(GETSTATE(m)->_time_str); - Py_VISIT(GETSTATE(m)->_bid_str); - Py_VISIT(GETSTATE(m)->_replace_str); - Py_VISIT(GETSTATE(m)->_astimezone_str); - Py_VISIT(GETSTATE(m)->_id_str); - Py_VISIT(GETSTATE(m)->_dollar_ref_str); - Py_VISIT(GETSTATE(m)->_dollar_id_str); - Py_VISIT(GETSTATE(m)->_dollar_db_str); - Py_VISIT(GETSTATE(m)->_tzinfo_str); - Py_VISIT(GETSTATE(m)->_as_doc_str); - Py_VISIT(GETSTATE(m)->_utcoffset_str); - Py_VISIT(GETSTATE(m)->_from_uuid_str); - Py_VISIT(GETSTATE(m)->_as_uuid_str); - Py_VISIT(GETSTATE(m)->_from_bid_str); - return 0; -} - -static int _cbson_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->Binary); - Py_CLEAR(GETSTATE(m)->Code); - Py_CLEAR(GETSTATE(m)->ObjectId); - Py_CLEAR(GETSTATE(m)->DBRef); - Py_CLEAR(GETSTATE(m)->Regex); - Py_CLEAR(GETSTATE(m)->UUID); - Py_CLEAR(GETSTATE(m)->Timestamp); - Py_CLEAR(GETSTATE(m)->MinKey); - Py_CLEAR(GETSTATE(m)->MaxKey); - Py_CLEAR(GETSTATE(m)->UTC); - Py_CLEAR(GETSTATE(m)->REType); - Py_CLEAR(GETSTATE(m)->_type_marker_str); - Py_CLEAR(GETSTATE(m)->_flags_str); - Py_CLEAR(GETSTATE(m)->_pattern_str); - Py_CLEAR(GETSTATE(m)->_encoder_map_str); - Py_CLEAR(GETSTATE(m)->_decoder_map_str); - Py_CLEAR(GETSTATE(m)->_fallback_encoder_str); - Py_CLEAR(GETSTATE(m)->_raw_str); - Py_CLEAR(GETSTATE(m)->_subtype_str); - Py_CLEAR(GETSTATE(m)->_binary_str); - Py_CLEAR(GETSTATE(m)->_scope_str); - Py_CLEAR(GETSTATE(m)->_inc_str); - Py_CLEAR(GETSTATE(m)->_time_str); - Py_CLEAR(GETSTATE(m)->_bid_str); - Py_CLEAR(GETSTATE(m)->_replace_str); - Py_CLEAR(GETSTATE(m)->_astimezone_str); - Py_CLEAR(GETSTATE(m)->_id_str); - Py_CLEAR(GETSTATE(m)->_dollar_ref_str); - Py_CLEAR(GETSTATE(m)->_dollar_id_str); - Py_CLEAR(GETSTATE(m)->_dollar_db_str); - Py_CLEAR(GETSTATE(m)->_tzinfo_str); - Py_CLEAR(GETSTATE(m)->_as_doc_str); - Py_CLEAR(GETSTATE(m)->_utcoffset_str); - Py_CLEAR(GETSTATE(m)->_from_uuid_str); - Py_CLEAR(GETSTATE(m)->_as_uuid_str); - Py_CLEAR(GETSTATE(m)->_from_bid_str); - return 0; -} - -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cbson", - NULL, - sizeof(struct module_state), - _CBSONMethods, - NULL, - _cbson_traverse, - _cbson_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cbson(void) -{ - PyObject *m; - PyObject *c_api_object; - static void *_cbson_API[_cbson_API_POINTER_COUNT]; - - PyDateTime_IMPORT; - if (PyDateTimeAPI == NULL) { - INITERROR; - } - - /* Export C API */ - _cbson_API[_cbson_buffer_write_bytes_INDEX] = (void *) buffer_write_bytes; - _cbson_API[_cbson_write_dict_INDEX] = (void *) write_dict; - _cbson_API[_cbson_write_pair_INDEX] = (void *) write_pair; - _cbson_API[_cbson_decode_and_write_pair_INDEX] = (void *) decode_and_write_pair; - _cbson_API[_cbson_convert_codec_options_INDEX] = (void *) convert_codec_options; - _cbson_API[_cbson_destroy_codec_options_INDEX] = (void *) destroy_codec_options; - _cbson_API[_cbson_buffer_write_double_INDEX] = (void *) buffer_write_double; - _cbson_API[_cbson_buffer_write_int32_INDEX] = (void *) buffer_write_int32; - _cbson_API[_cbson_buffer_write_int64_INDEX] = (void *) buffer_write_int64; - _cbson_API[_cbson_buffer_write_int32_at_position_INDEX] = - (void *) buffer_write_int32_at_position; - _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; - - c_api_object = PyCapsule_New((void *) _cbson_API, "_cbson._C_API", NULL); - if (c_api_object == NULL) - INITERROR; - - m = PyModule_Create(&moduledef); - if (m == NULL) { - Py_DECREF(c_api_object); - INITERROR; - } - - /* Import several python objects */ - if (_load_python_objects(m)) { - Py_DECREF(c_api_object); - Py_DECREF(m); - INITERROR; - } - - if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { - Py_DECREF(c_api_object); - Py_DECREF(m); - INITERROR; - } - - return m; -} diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py deleted file mode 100644 index 324ffd016515f0f96e6505e53ffc5c50b149be49..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py +++ /dev/null @@ -1,1037 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import floatToFixedToStr -from fontTools.misc.textTools import byteord, safeEval - -# from itertools import * -from . import DefaultTable -from . import grUtils -from array import array -from functools import reduce -import struct, re, sys - -Silf_hdr_format = """ - > - version: 16.16F -""" - -Silf_hdr_format_3 = """ - > - version: 16.16F - compilerVersion: L - numSilf: H - x - x -""" - -Silf_part1_format_v3 = """ - > - ruleVersion: 16.16F - passOffset: H - pseudosOffset: H -""" - -Silf_part1_format = """ - > - maxGlyphID: H - extraAscent: h - extraDescent: h - numPasses: B - iSubst: B - iPos: B - iJust: B - iBidi: B - flags: B - maxPreContext: B - maxPostContext: B - attrPseudo: B - attrBreakWeight: B - attrDirectionality: B - attrMirroring: B - attrSkipPasses: B - numJLevels: B -""" - -Silf_justify_format = """ - > - attrStretch: B - attrShrink: B - attrStep: B - attrWeight: B - runto: B - x - x - x -""" - -Silf_part2_format = """ - > - numLigComp: H - numUserDefn: B - maxCompPerLig: B - direction: B - attCollisions: B - x - x - x - numCritFeatures: B -""" - -Silf_pseudomap_format = """ - > - unicode: L - nPseudo: H -""" - -Silf_pseudomap_format_h = """ - > - unicode: H - nPseudo: H -""" - -Silf_classmap_format = """ - > - numClass: H - numLinear: H -""" - -Silf_lookupclass_format = """ - > - numIDs: H - searchRange: H - entrySelector: H - rangeShift: H -""" - -Silf_lookuppair_format = """ - > - glyphId: H - index: H -""" - -Silf_pass_format = """ - > - flags: B - maxRuleLoop: B - maxRuleContext: B - maxBackup: B - numRules: H - fsmOffset: H - pcCode: L - rcCode: L - aCode: L - oDebug: L - numRows: H - numTransitional: H - numSuccess: H - numColumns: H -""" - -aCode_info = ( - ("NOP", 0), - ("PUSH_BYTE", "b"), - ("PUSH_BYTE_U", "B"), - ("PUSH_SHORT", ">h"), - ("PUSH_SHORT_U", ">H"), - ("PUSH_LONG", ">L"), - ("ADD", 0), - ("SUB", 0), - ("MUL", 0), - ("DIV", 0), - ("MIN", 0), - ("MAX", 0), - ("NEG", 0), - ("TRUNC8", 0), - ("TRUNC16", 0), - ("COND", 0), - ("AND", 0), # x10 - ("OR", 0), - ("NOT", 0), - ("EQUAL", 0), - ("NOT_EQ", 0), - ("LESS", 0), - ("GTR", 0), - ("LESS_EQ", 0), - ("GTR_EQ", 0), - ("NEXT", 0), - ("NEXT_N", "b"), - ("COPY_NEXT", 0), - ("PUT_GLYPH_8BIT_OBS", "B"), - ("PUT_SUBS_8BIT_OBS", "bBB"), - ("PUT_COPY", "b"), - ("INSERT", 0), - ("DELETE", 0), # x20 - ("ASSOC", -1), - ("CNTXT_ITEM", "bB"), - ("ATTR_SET", "B"), - ("ATTR_ADD", "B"), - ("ATTR_SUB", "B"), - ("ATTR_SET_SLOT", "B"), - ("IATTR_SET_SLOT", "BB"), - ("PUSH_SLOT_ATTR", "Bb"), - ("PUSH_GLYPH_ATTR_OBS", "Bb"), - ("PUSH_GLYPH_METRIC", "Bbb"), - ("PUSH_FEAT", "Bb"), - ("PUSH_ATT_TO_GATTR_OBS", "Bb"), - ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), - ("PUSH_ISLOT_ATTR", "Bbb"), - ("PUSH_IGLYPH_ATTR", "Bbb"), - ("POP_RET", 0), # x30 - ("RET_ZERO", 0), - ("RET_TRUE", 0), - ("IATTR_SET", "BB"), - ("IATTR_ADD", "BB"), - ("IATTR_SUB", "BB"), - ("PUSH_PROC_STATE", "B"), - ("PUSH_VERSION", 0), - ("PUT_SUBS", ">bHH"), - ("PUT_SUBS2", 0), - ("PUT_SUBS3", 0), - ("PUT_GLYPH", ">H"), - ("PUSH_GLYPH_ATTR", ">Hb"), - ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), - ("BITOR", 0), - ("BITAND", 0), - ("BITNOT", 0), # x40 - ("BITSET", ">HH"), - ("SET_FEAT", "Bb"), -) -aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)]) - - -def disassemble(aCode): - codelen = len(aCode) - pc = 0 - res = [] - while pc < codelen: - opcode = byteord(aCode[pc : pc + 1]) - if opcode > len(aCode_info): - instr = aCode_info[0] - else: - instr = aCode_info[opcode] - pc += 1 - if instr[1] != 0 and pc >= codelen: - return res - if instr[1] == -1: - count = byteord(aCode[pc]) - fmt = "%dB" % count - pc += 1 - elif instr[1] == 0: - fmt = "" - else: - fmt = instr[1] - if fmt == "": - res.append(instr[0]) - continue - parms = struct.unpack_from(fmt, aCode[pc:]) - res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") - pc += struct.calcsize(fmt) - return res - - -instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?") - - -def assemble(instrs): - res = b"" - for inst in instrs: - m = instre.match(inst) - if not m or not m.group(1) in aCode_map: - continue - opcode, parmfmt = aCode_map[m.group(1)] - res += struct.pack("B", opcode) - if m.group(2): - if parmfmt == 0: - continue - parms = [int(x) for x in re.split(r",\s*", m.group(2))] - if parmfmt == -1: - l = len(parms) - res += struct.pack(("%dB" % (l + 1)), l, *parms) - else: - res += struct.pack(parmfmt, *parms) - return res - - -def writecode(tag, writer, instrs): - writer.begintag(tag) - writer.newline() - for l in disassemble(instrs): - writer.write(l) - writer.newline() - writer.endtag(tag) - writer.newline() - - -def readcode(content): - res = [] - for e in content_string(content).split("\n"): - e = e.strip() - if not len(e): - continue - res.append(e) - return assemble(res) - - -attrs_info = ( - "flags", - "extraAscent", - "extraDescent", - "maxGlyphID", - "numLigComp", - "numUserDefn", - "maxCompPerLig", - "direction", - "lbGID", -) -attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi") -attrs_contexts = ("maxPreContext", "maxPostContext") -attrs_attributes = ( - "attrPseudo", - "attrBreakWeight", - "attrDirectionality", - "attrMirroring", - "attrSkipPasses", - "attCollisions", -) -pass_attrs_info = ( - "flags", - "maxRuleLoop", - "maxRuleContext", - "maxBackup", - "minRulePreContext", - "maxRulePreContext", - "collisionThreshold", -) -pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns") - - -def writesimple(tag, self, writer, *attrkeys): - attrs = dict([(k, getattr(self, k)) for k in attrkeys]) - writer.simpletag(tag, **attrs) - writer.newline() - - -def getSimple(self, attrs, *attr_list): - for k in attr_list: - if k in attrs: - setattr(self, k, int(safeEval(attrs[k]))) - - -def content_string(contents): - res = "" - for element in contents: - if isinstance(element, tuple): - continue - res += element - return res.strip() - - -def wrapline(writer, dat, length=80): - currline = "" - for d in dat: - if len(currline) > length: - writer.write(currline[:-1]) - writer.newline() - currline = "" - currline += d + " " - if len(currline): - writer.write(currline[:-1]) - writer.newline() - - -class _Object: - pass - - -class table_S__i_l_f(DefaultTable.DefaultTable): - """Silf table support""" - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.silfs = [] - - def decompile(self, data, ttFont): - sstruct.unpack2(Silf_hdr_format, data, self) - self.version = float(floatToFixedToStr(self.version, precisionBits=16)) - if self.version >= 5.0: - (data, self.scheme) = grUtils.decompress(data) - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - elif self.version < 3.0: - self.numSilf = struct.unpack(">H", data[4:6]) - self.scheme = 0 - self.compilerVersion = 0 - base = 8 - else: - self.scheme = 0 - sstruct.unpack2(Silf_hdr_format_3, data, self) - base = sstruct.calcsize(Silf_hdr_format_3) - - silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:]) - for offset in silfoffsets: - s = Silf() - self.silfs.append(s) - s.decompile(data[offset:], ttFont, self.version) - - def compile(self, ttFont): - self.numSilf = len(self.silfs) - if self.version < 3.0: - hdr = sstruct.pack(Silf_hdr_format, self) - hdr += struct.pack(">HH", self.numSilf, 0) - else: - hdr = sstruct.pack(Silf_hdr_format_3, self) - offset = len(hdr) + 4 * self.numSilf - data = b"" - for s in self.silfs: - hdr += struct.pack(">L", offset) - subdata = s.compile(ttFont, self.version) - offset += len(subdata) - data += subdata - if self.version >= 5.0: - return grUtils.compress(self.scheme, hdr + data) - return hdr + data - - def toXML(self, writer, ttFont): - writer.comment("Attributes starting with _ are informative only") - writer.newline() - writer.simpletag( - "version", - version=self.version, - compilerVersion=self.compilerVersion, - compressionScheme=self.scheme, - ) - writer.newline() - for s in self.silfs: - writer.begintag("silf") - writer.newline() - s.toXML(writer, ttFont, self.version) - writer.endtag("silf") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.scheme = int(safeEval(attrs["compressionScheme"])) - self.version = float(safeEval(attrs["version"])) - self.compilerVersion = int(safeEval(attrs["compilerVersion"])) - return - if name == "silf": - s = Silf() - self.silfs.append(s) - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - s.fromXML(tag, attrs, subcontent, ttFont, self.version) - - -class Silf(object): - """A particular Silf subtable""" - - def __init__(self): - self.passes = [] - self.scriptTags = [] - self.critFeatures = [] - self.jLevels = [] - self.pMap = {} - - def decompile(self, data, ttFont, version=2.0): - if version >= 3.0: - _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) - self.ruleVersion = float( - floatToFixedToStr(self.ruleVersion, precisionBits=16) - ) - _, data = sstruct.unpack2(Silf_part1_format, data, self) - for jlevel in range(self.numJLevels): - j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) - self.jLevels.append(j) - _, data = sstruct.unpack2(Silf_part2_format, data, self) - if self.numCritFeatures: - self.critFeatures = struct.unpack_from( - (">%dH" % self.numCritFeatures), data - ) - data = data[self.numCritFeatures * 2 + 1 :] - (numScriptTag,) = struct.unpack_from("B", data) - if numScriptTag: - self.scriptTags = [ - struct.unpack("4s", data[x : x + 4])[0].decode("ascii") - for x in range(1, 1 + 4 * numScriptTag, 4) - ] - data = data[1 + 4 * numScriptTag :] - (self.lbGID,) = struct.unpack(">H", data[:2]) - if self.numPasses: - self.oPasses = struct.unpack( - (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses] - ) - data = data[6 + 4 * self.numPasses :] - (numPseudo,) = struct.unpack(">H", data[:2]) - for i in range(numPseudo): - if version >= 3.0: - pseudo = sstruct.unpack( - Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object() - ) - else: - pseudo = sstruct.unpack( - Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object() - ) - self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) - data = data[8 + 6 * numPseudo :] - currpos = ( - sstruct.calcsize(Silf_part1_format) - + sstruct.calcsize(Silf_justify_format) * self.numJLevels - + sstruct.calcsize(Silf_part2_format) - + 2 * self.numCritFeatures - + 1 - + 1 - + 4 * numScriptTag - + 6 - + 4 * self.numPasses - + 8 - + 6 * numPseudo - ) - if version >= 3.0: - currpos += sstruct.calcsize(Silf_part1_format_v3) - self.classes = Classes() - self.classes.decompile(data, ttFont, version) - for i in range(self.numPasses): - p = Pass() - self.passes.append(p) - p.decompile( - data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos], - ttFont, - version, - ) - - def compile(self, ttFont, version=2.0): - self.numPasses = len(self.passes) - self.numJLevels = len(self.jLevels) - self.numCritFeatures = len(self.critFeatures) - numPseudo = len(self.pMap) - data = b"" - if version >= 3.0: - hdroffset = sstruct.calcsize(Silf_part1_format_v3) - else: - hdroffset = 0 - data += sstruct.pack(Silf_part1_format, self) - for j in self.jLevels: - data += sstruct.pack(Silf_justify_format, j) - data += sstruct.pack(Silf_part2_format, self) - if self.numCritFeatures: - data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) - data += struct.pack("BB", 0, len(self.scriptTags)) - if len(self.scriptTags): - tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags] - data += b"".join(tdata) - data += struct.pack(">H", self.lbGID) - self.passOffset = len(data) - - data1 = grUtils.bininfo(numPseudo, 6) - currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) - self.pseudosOffset = currpos + len(data1) - for u, p in sorted(self.pMap.items()): - data1 += struct.pack( - (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p) - ) - data1 += self.classes.compile(ttFont, version) - currpos += len(data1) - data2 = b"" - datao = b"" - for i, p in enumerate(self.passes): - base = currpos + len(data2) - datao += struct.pack(">L", base) - data2 += p.compile(ttFont, base, version) - datao += struct.pack(">L", currpos + len(data2)) - - if version >= 3.0: - data3 = sstruct.pack(Silf_part1_format_v3, self) - else: - data3 = b"" - return data3 + data + datao + data1 + data2 - - def toXML(self, writer, ttFont, version=2.0): - if version >= 3.0: - writer.simpletag("version", ruleVersion=self.ruleVersion) - writer.newline() - writesimple("info", self, writer, *attrs_info) - writesimple("passindexes", self, writer, *attrs_passindexes) - writesimple("contexts", self, writer, *attrs_contexts) - writesimple("attributes", self, writer, *attrs_attributes) - if len(self.jLevels): - writer.begintag("justifications") - writer.newline() - jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) - for i, j in enumerate(self.jLevels): - attrs = dict([(k, getattr(j, k)) for k in jnames]) - writer.simpletag("justify", **attrs) - writer.newline() - writer.endtag("justifications") - writer.newline() - if len(self.critFeatures): - writer.begintag("critFeatures") - writer.newline() - writer.write(" ".join(map(str, self.critFeatures))) - writer.newline() - writer.endtag("critFeatures") - writer.newline() - if len(self.scriptTags): - writer.begintag("scriptTags") - writer.newline() - writer.write(" ".join(self.scriptTags)) - writer.newline() - writer.endtag("scriptTags") - writer.newline() - if self.pMap: - writer.begintag("pseudoMap") - writer.newline() - for k, v in sorted(self.pMap.items()): - writer.simpletag("pseudo", unicode=hex(k), pseudo=v) - writer.newline() - writer.endtag("pseudoMap") - writer.newline() - self.classes.toXML(writer, ttFont, version) - if len(self.passes): - writer.begintag("passes") - writer.newline() - for i, p in enumerate(self.passes): - writer.begintag("pass", _index=i) - writer.newline() - p.toXML(writer, ttFont, version) - writer.endtag("pass") - writer.newline() - writer.endtag("passes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "version": - self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0"))) - if name == "info": - getSimple(self, attrs, *attrs_info) - elif name == "passindexes": - getSimple(self, attrs, *attrs_passindexes) - elif name == "contexts": - getSimple(self, attrs, *attrs_contexts) - elif name == "attributes": - getSimple(self, attrs, *attrs_attributes) - elif name == "justifications": - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "justify": - j = _Object() - for k, v in attrs.items(): - setattr(j, k, int(v)) - self.jLevels.append(j) - elif name == "critFeatures": - self.critFeatures = [] - element = content_string(content) - self.critFeatures.extend(map(int, element.split())) - elif name == "scriptTags": - self.scriptTags = [] - element = content_string(content) - for n in element.split(): - self.scriptTags.append(n) - elif name == "pseudoMap": - self.pMap = {} - for element in content: - if not isinstance(element, tuple): - continue - (tag, attrs, subcontent) = element - if tag == "pseudo": - k = int(attrs["unicode"], 16) - v = attrs["pseudo"] - self.pMap[k] = v - elif name == "classes": - self.classes = Classes() - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - self.classes.fromXML(tag, attrs, subcontent, ttFont, version) - elif name == "passes": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "pass": - p = Pass() - for e in subcontent: - if not isinstance(e, tuple): - continue - p.fromXML(e[0], e[1], e[2], ttFont, version) - self.passes.append(p) - - -class Classes(object): - def __init__(self): - self.linear = [] - self.nonLinear = [] - - def decompile(self, data, ttFont, version=2.0): - sstruct.unpack2(Silf_classmap_format, data, self) - if version >= 4.0: - oClasses = struct.unpack( - (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass] - ) - else: - oClasses = struct.unpack( - (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass] - ) - for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]): - self.linear.append( - ttFont.getGlyphName(x) - for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e]) - ) - for s, e in zip( - oClasses[self.numLinear : self.numClass], - oClasses[self.numLinear + 1 : self.numClass + 1], - ): - nonLinids = [ - struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4) - ] - nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) - self.nonLinear.append(nonLin) - - def compile(self, ttFont, version=2.0): - data = b"" - oClasses = [] - if version >= 4.0: - offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) - else: - offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) - for l in self.linear: - oClasses.append(len(data) + offset) - gs = [ttFont.getGlyphID(x) for x in l] - data += struct.pack((">%dH" % len(l)), *gs) - for l in self.nonLinear: - oClasses.append(len(data) + offset) - gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] - data += grUtils.bininfo(len(gs)) - data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)]) - oClasses.append(len(data) + offset) - self.numClass = len(oClasses) - 1 - self.numLinear = len(self.linear) - return ( - sstruct.pack(Silf_classmap_format, self) - + struct.pack( - ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses - ) - + data - ) - - def toXML(self, writer, ttFont, version=2.0): - writer.begintag("classes") - writer.newline() - writer.begintag("linearClasses") - writer.newline() - for i, l in enumerate(self.linear): - writer.begintag("linear", _index=i) - writer.newline() - wrapline(writer, l) - writer.endtag("linear") - writer.newline() - writer.endtag("linearClasses") - writer.newline() - writer.begintag("nonLinearClasses") - writer.newline() - for i, l in enumerate(self.nonLinear): - writer.begintag("nonLinear", _index=i + self.numLinear) - writer.newline() - for inp, ind in l.items(): - writer.simpletag("map", glyph=inp, index=ind) - writer.newline() - writer.endtag("nonLinear") - writer.newline() - writer.endtag("nonLinearClasses") - writer.newline() - writer.endtag("classes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "linearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "linear": - l = content_string(subcontent).split() - self.linear.append(l) - elif name == "nonLinearClasses": - for element in content: - if not isinstance(element, tuple): - continue - tag, attrs, subcontent = element - if tag == "nonLinear": - l = {} - for e in subcontent: - if not isinstance(e, tuple): - continue - tag, attrs, subsubcontent = e - if tag == "map": - l[attrs["glyph"]] = int(safeEval(attrs["index"])) - self.nonLinear.append(l) - - -class Pass(object): - def __init__(self): - self.colMap = {} - self.rules = [] - self.rulePreContexts = [] - self.ruleSortKeys = [] - self.ruleConstraints = [] - self.passConstraints = b"" - self.actions = [] - self.stateTrans = [] - self.startStates = [] - - def decompile(self, data, ttFont, version=2.0): - _, data = sstruct.unpack2(Silf_pass_format, data, self) - (numRange, _, _, _) = struct.unpack(">4H", data[:8]) - data = data[8:] - for i in range(numRange): - (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6]) - for g in range(first, last + 1): - self.colMap[ttFont.getGlyphName(g)] = col - data = data[6 * numRange :] - oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) - data = data[2 + 2 * self.numSuccess :] - rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) - self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])] - data = data[2 * oRuleMap[-1] :] - (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2]) - numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 - self.startStates = struct.unpack( - (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2] - ) - data = data[2 + numStartStates * 2 :] - self.ruleSortKeys = struct.unpack( - (">%dH" % self.numRules), data[: 2 * self.numRules] - ) - data = data[2 * self.numRules :] - self.rulePreContexts = struct.unpack( - ("%dB" % self.numRules), data[: self.numRules] - ) - data = data[self.numRules :] - (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) - oConstraints = list( - struct.unpack( - (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2] - ) - ) - data = data[5 + self.numRules * 2 :] - oActions = list( - struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2]) - ) - data = data[2 * self.numRules + 2 :] - for i in range(self.numTransitional): - a = array( - "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2] - ) - if sys.byteorder != "big": - a.byteswap() - self.stateTrans.append(a) - data = data[self.numTransitional * self.numColumns * 2 + 1 :] - self.passConstraints = data[:pConstraint] - data = data[pConstraint:] - for i in range(len(oConstraints) - 2, -1, -1): - if oConstraints[i] == 0: - oConstraints[i] = oConstraints[i + 1] - self.ruleConstraints = [ - (data[s:e] if (e - s > 1) else b"") - for (s, e) in zip(oConstraints, oConstraints[1:]) - ] - data = data[oConstraints[-1] :] - self.actions = [ - (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:]) - ] - data = data[oActions[-1] :] - # not using debug - - def compile(self, ttFont, base, version=2.0): - # build it all up backwards - oActions = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, []) - )[1] - oConstraints = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), - self.ruleConstraints + [b""], - (1, []), - )[1] - constraintCode = b"\000" + b"".join(self.ruleConstraints) - transes = [] - for t in self.stateTrans: - if sys.byteorder != "big": - t.byteswap() - transes.append(t.tobytes()) - if sys.byteorder != "big": - t.byteswap() - if not len(transes): - self.startStates = [0] - oRuleMap = reduce( - lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, []) - )[1] - passRanges = [] - gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) - for e in grUtils.entries(gidcolmap, sameval=True): - if e[1]: - passRanges.append((e[0], e[0] + e[1] - 1, e[2][0])) - self.numRules = len(self.actions) - self.fsmOffset = ( - sstruct.calcsize(Silf_pass_format) - + 8 - + len(passRanges) * 6 - + len(oRuleMap) * 2 - + 2 * oRuleMap[-1] - + 2 - + 2 * len(self.startStates) - + 3 * self.numRules - + 3 - + 4 * self.numRules - + 4 - ) - self.pcCode = ( - self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base - ) - self.rcCode = self.pcCode + len(self.passConstraints) - self.aCode = self.rcCode + len(constraintCode) - self.oDebug = 0 - # now generate output - data = sstruct.pack(Silf_pass_format, self) - data += grUtils.bininfo(len(passRanges), 6) - data += b"".join(struct.pack(">3H", *p) for p in passRanges) - data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) - flatrules = reduce(lambda a, x: a + x, self.rules, []) - data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) - data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) - data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) - data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) - data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) - data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) - data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints) - data += struct.pack((">%dH" % (self.numRules + 1)), *oActions) - return ( - data - + b"".join(transes) - + struct.pack("B", 0) - + self.passConstraints - + constraintCode - + b"".join(self.actions) - ) - - def toXML(self, writer, ttFont, version=2.0): - writesimple("info", self, writer, *pass_attrs_info) - writesimple("fsminfo", self, writer, *pass_attrs_fsm) - writer.begintag("colmap") - writer.newline() - wrapline( - writer, - [ - "{}={}".format(*x) - for x in sorted( - self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0]) - ) - ], - ) - writer.endtag("colmap") - writer.newline() - writer.begintag("staterulemap") - writer.newline() - for i, r in enumerate(self.rules): - writer.simpletag( - "state", - number=self.numRows - self.numSuccess + i, - rules=" ".join(map(str, r)), - ) - writer.newline() - writer.endtag("staterulemap") - writer.newline() - writer.begintag("rules") - writer.newline() - for i in range(len(self.actions)): - writer.begintag( - "rule", - index=i, - precontext=self.rulePreContexts[i], - sortkey=self.ruleSortKeys[i], - ) - writer.newline() - if len(self.ruleConstraints[i]): - writecode("constraint", writer, self.ruleConstraints[i]) - writecode("action", writer, self.actions[i]) - writer.endtag("rule") - writer.newline() - writer.endtag("rules") - writer.newline() - if len(self.passConstraints): - writecode("passConstraint", writer, self.passConstraints) - if len(self.stateTrans): - writer.begintag("fsm") - writer.newline() - writer.begintag("starts") - writer.write(" ".join(map(str, self.startStates))) - writer.endtag("starts") - writer.newline() - for i, s in enumerate(self.stateTrans): - writer.begintag("row", _i=i) - # no newlines here - writer.write(" ".join(map(str, s))) - writer.endtag("row") - writer.newline() - writer.endtag("fsm") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, version=2.0): - if name == "info": - getSimple(self, attrs, *pass_attrs_info) - elif name == "fsminfo": - getSimple(self, attrs, *pass_attrs_fsm) - elif name == "colmap": - e = content_string(content) - for w in e.split(): - x = w.split("=") - if len(x) != 2 or x[0] == "" or x[1] == "": - continue - self.colMap[x[0]] = int(x[1]) - elif name == "staterulemap": - for e in content: - if not isinstance(e, tuple): - continue - tag, a, c = e - if tag == "state": - self.rules.append([int(x) for x in a["rules"].split(" ")]) - elif name == "rules": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag != "rule": - continue - self.rulePreContexts.append(int(a["precontext"])) - self.ruleSortKeys.append(int(a["sortkey"])) - con = b"" - act = b"" - for e in c: - if not isinstance(e, tuple): - continue - tag, a, subc = e - if tag == "constraint": - con = readcode(subc) - elif tag == "action": - act = readcode(subc) - self.actions.append(act) - self.ruleConstraints.append(con) - elif name == "passConstraint": - self.passConstraints = readcode(content) - elif name == "fsm": - for element in content: - if not isinstance(element, tuple): - continue - tag, a, c = element - if tag == "row": - s = array("H") - e = content_string(c) - s.extend(map(int, e.split())) - self.stateTrans.append(s) - elif tag == "starts": - s = [] - e = content_string(c) - s.extend(map(int, e.split())) - self.startStates = s diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/query/tree/retrieve_query.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/query/tree/retrieve_query.py deleted file mode 100644 index 7911f40cc7b1721e4b5e3d66c9a6b7b551dc7027..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/query/tree/retrieve_query.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Retrieve query.""" -import logging -from typing import List, Optional - -from gpt_index.data_structs.data_structs import IndexGraph, Node -from gpt_index.indices.query.base import BaseGPTIndexQuery -from gpt_index.indices.query.embedding_utils import SimilarityTracker -from gpt_index.indices.query.schema import QueryBundle -from gpt_index.indices.utils import get_sorted_node_list - - -class GPTTreeIndexRetQuery(BaseGPTIndexQuery[IndexGraph]): - """GPT Tree Index retrieve query. - - This class directly retrieves the answer from the root nodes. - - Unlike GPTTreeIndexLeafQuery, this class assumes the graph already stores - the answer (because it was constructed with a query_str), so it does not - attempt to parse information down the graph in order to synthesize an answer. - - .. code-block:: python - - response = index.query("", mode="retrieve") - - Args: - text_qa_template (Optional[QuestionAnswerPrompt]): Question-Answer Prompt - (see :ref:`Prompt-Templates`). - - """ - - def _get_nodes_for_response( - self, - query_bundle: QueryBundle, - similarity_tracker: Optional[SimilarityTracker] = None, - ) -> List[Node]: - """Get nodes for response.""" - logging.info(f"> Starting query: {query_bundle.query_str}") - node_list = get_sorted_node_list(self.index_struct.root_nodes) - text_qa_template = self.text_qa_template.partial_format( - query_str=query_bundle.query_str - ) - node_text = self._prompt_helper.get_text_from_nodes( - node_list, prompt=text_qa_template - ) - return [Node(text=node_text)] diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/prompts/chat_prompts.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/prompts/chat_prompts.py deleted file mode 100644 index 3ce552e916d9defb64be146fd094835e4c429be9..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/prompts/chat_prompts.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Prompts for ChatGPT.""" - -from langchain.prompts.chat import ( - AIMessagePromptTemplate, - ChatPromptTemplate, - HumanMessagePromptTemplate, -) - -from gpt_index.prompts.prompts import RefinePrompt, RefineTableContextPrompt - -# Refine Prompt -CHAT_REFINE_PROMPT_TMPL_MSGS = [ - HumanMessagePromptTemplate.from_template("{query_str}"), - AIMessagePromptTemplate.from_template("{existing_answer}"), - HumanMessagePromptTemplate.from_template( - "We have the opportunity to refine the above answer " - "(only if needed) with some more context below.\n" - "------------\n" - "{context_msg}\n" - "------------\n" - "Given the new context, refine the original answer to better " - "answer the question. " - "If the context isn't useful, output the original answer again.", - ), -] - - -CHAT_REFINE_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_REFINE_PROMPT_TMPL_MSGS) -CHAT_REFINE_PROMPT = RefinePrompt.from_langchain_prompt(CHAT_REFINE_PROMPT_LC) - - -# Table Context Refine Prompt -CHAT_REFINE_TABLE_CONTEXT_TMPL_MSGS = [ - HumanMessagePromptTemplate.from_template("{query_str}"), - AIMessagePromptTemplate.from_template("{existing_answer}"), - HumanMessagePromptTemplate.from_template( - "We have provided a table schema below. " - "---------------------\n" - "{schema}\n" - "---------------------\n" - "We have also provided some context information below. " - "{context_msg}\n" - "---------------------\n" - "Given the context information and the table schema, " - "refine the original answer to better " - "answer the question. " - "If the context isn't useful, return the original answer." - ), -] -CHAT_REFINE_TABLE_CONTEXT_PROMPT_LC = ChatPromptTemplate.from_messages( - CHAT_REFINE_TABLE_CONTEXT_TMPL_MSGS -) -CHAT_REFINE_TABLE_CONTEXT_PROMPT = RefineTableContextPrompt.from_langchain_prompt( - CHAT_REFINE_TABLE_CONTEXT_PROMPT_LC -) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/slack.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/slack.py deleted file mode 100644 index 7dac25e0f01021c64ba72bfae8d1526558c73a4d..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/slack.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Slack reader.""" -import logging -import os -import time -from typing import List, Optional - -from gpt_index.readers.base import BaseReader -from gpt_index.readers.schema.base import Document - - -class SlackReader(BaseReader): - """Slack reader. - - Reads conversations from channels. - - Args: - slack_token (Optional[str]): Slack token. If not provided, we - assume the environment variable `SLACK_BOT_TOKEN` is set. - - """ - - def __init__(self, slack_token: Optional[str] = None) -> None: - """Initialize with parameters.""" - try: - from slack_sdk import WebClient - except ImportError: - raise ImportError( - "`slack_sdk` package not found, please run `pip install slack_sdk`" - ) - if slack_token is None: - slack_token = os.environ["SLACK_BOT_TOKEN"] - if slack_token is None: - raise ValueError( - "Must specify `slack_token` or set environment " - "variable `SLACK_BOT_TOKEN`." - ) - self.client = WebClient(token=slack_token) - res = self.client.api_test() - if not res["ok"]: - raise ValueError(f"Error initializing Slack API: {res['error']}") - - def _read_message(self, channel_id: str, message_ts: str) -> str: - from slack_sdk.errors import SlackApiError - - """Read a message.""" - - messages_text = [] - next_cursor = None - while True: - try: - # https://slack.com/api/conversations.replies - # List all replies to a message, including the message itself. - result = self.client.conversations_replies( - channel=channel_id, ts=message_ts, cursor=next_cursor - ) - messages = result["messages"] - for message in messages: - messages_text.append(message["text"]) - - if not result["has_more"]: - break - - next_cursor = result["response_metadata"]["next_cursor"] - except SlackApiError as e: - if e.response["error"] == "ratelimited": - logging.error( - "Rate limit error reached, sleeping for: {} seconds".format( - e.response.headers["retry-after"] - ) - ) - time.sleep(int(e.response.headers["retry-after"])) - else: - logging.error("Error parsing conversation replies: {}".format(e)) - - return "\n\n".join(messages_text) - - def _read_channel(self, channel_id: str) -> str: - from slack_sdk.errors import SlackApiError - - """Read a channel.""" - - result_messages = [] - next_cursor = None - while True: - try: - # Call the conversations.history method using the WebClient - # conversations.history returns the first 100 messages by default - # These results are paginated, - # see: https://api.slack.com/methods/conversations.history$pagination - result = self.client.conversations_history( - channel=channel_id, cursor=next_cursor - ) - conversation_history = result["messages"] - # Print results - logging.info( - "{} messages found in {}".format(len(conversation_history), id) - ) - for message in conversation_history: - result_messages.append( - self._read_message(channel_id, message["ts"]) - ) - - if not result["has_more"]: - break - next_cursor = result["response_metadata"]["next_cursor"] - - except SlackApiError as e: - if e.response["error"] == "ratelimited": - logging.error( - "Rate limit error reached, sleeping for: {} seconds".format( - e.response.headers["retry-after"] - ) - ) - time.sleep(int(e.response.headers["retry-after"])) - else: - logging.error("Error parsing conversation replies: {}".format(e)) - - return "\n\n".join(result_messages) - - def load_data(self, channel_ids: List[str]) -> List[Document]: - """Load data from the input directory. - - Args: - channel_ids (List[str]): List of channel ids to read. - - Returns: - List[Document]: List of documents. - - """ - results = [] - for channel_id in channel_ids: - channel_content = self._read_channel(channel_id) - results.append( - Document(channel_content, extra_info={"channel": channel_id}) - ) - return results - - -if __name__ == "__main__": - reader = SlackReader() - logging.info(reader.load_data(channel_ids=["C04DC2VUY3F"])) diff --git a/spaces/jordonpeter01/SD-2.1-CPU/README.md b/spaces/jordonpeter01/SD-2.1-CPU/README.md deleted file mode 100644 index 305b17c26e6cf9097d8ed11927e463c17017fa49..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/SD-2.1-CPU/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SD 2.1 CPU -emoji: 🐢 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: Manjushri/SD-2.1-CPU ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/k1ngtai/MMS/uroman/bin/uroman-quick.pl b/spaces/k1ngtai/MMS/uroman/bin/uroman-quick.pl deleted file mode 100644 index 3c2bb6a84e891d68e7ee996dd72d154e8820c05d..0000000000000000000000000000000000000000 --- a/spaces/k1ngtai/MMS/uroman/bin/uroman-quick.pl +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/perl -w - -# uroman Nov. 12, 2015 - July 25, 2016 -# version v0.7 -# Author: Ulf Hermjakob - -# Usage: uroman-quick.pl {-l [tur|uig|ukr|yid]} < STDIN -# currently only for Arabic script languages, incl. Uyghur - -$|=1; - -use FindBin; -use Cwd "abs_path"; -use File::Basename qw(dirname); -use File::Spec; - -my $bin_dir = abs_path(dirname($0)); -my $root_dir = File::Spec->catfile($bin_dir, File::Spec->updir()); -my $data_dir = File::Spec->catfile($root_dir, "data"); -my $lib_dir = File::Spec->catfile($root_dir, "lib"); - -use lib "$FindBin::Bin/../lib"; -use NLP::Romanizer; -use NLP::UTF8; -$romanizer = NLP::Romanizer; -%ht = (); -$lang_code = ""; - -while (@ARGV) { - $arg = shift @ARGV; - if ($arg =~ /^-+(l|lc|lang-code)$/) { - $lang_code = lc (shift @ARGV || "") - } else { - print STDERR "Ignoring unrecognized arg $arg\n"; - } -} - -$romanization_table_arabic_block_filename = File::Spec->catfile($data_dir, "romanization-table-arabic-block.txt"); -$romanization_table_filename = File::Spec->catfile($data_dir, "romanization-table.txt"); - -$romanizer->load_romanization_table(*ht, $romanization_table_arabic_block_filename); -$romanizer->load_romanization_table(*ht, $romanization_table_filename); - -$line_number = 0; -while (<>) { - $line_number++; - my $line = $_; - print $romanizer->quick_romanize($line, $lang_code, *ht) . "\n"; - if ($line_number =~ /0000$/) { - print STDERR $line_number; - } elsif ($line_number =~ /000$/) { - print STDERR "."; - } -} -print STDERR "\n"; - -exit 0; - diff --git a/spaces/kangvcar/RealChar/client/web/src/reportWebVitals.js b/spaces/kangvcar/RealChar/client/web/src/reportWebVitals.js deleted file mode 100644 index 5253d3ad9e6be6690549cb255f5952337b02401d..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/client/web/src/reportWebVitals.js +++ /dev/null @@ -1,13 +0,0 @@ -const reportWebVitals = onPerfEntry => { - if (onPerfEntry && onPerfEntry instanceof Function) { - import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { - getCLS(onPerfEntry); - getFID(onPerfEntry); - getFCP(onPerfEntry); - getLCP(onPerfEntry); - getTTFB(onPerfEntry); - }); - } -}; - -export default reportWebVitals; diff --git a/spaces/karolmajek/YOLOR/darknet/README.md b/spaces/karolmajek/YOLOR/darknet/README.md deleted file mode 100644 index d2fc579741572cb0eaa03ca74598eee6da50985f..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/darknet/README.md +++ /dev/null @@ -1,63 +0,0 @@ -## Model Zoo - -| Model | Test Size | APval | AP50val | AP75val | APSval | APMval | APLval | batch1 throughput | -| :-- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | -| **YOLOv4-CSP** | 640 | **49.1%** | **67.7%** | **53.8%** | **32.1%** | **54.4%** | **63.2%** | 76 *fps* | -| **YOLOR-CSP** | 640 | **49.2%** | **67.6%** | **53.7%** | **32.9%** | **54.4%** | **63.0%** | - | -| | | | | | | | -| **YOLOv4-CSP-X** | 640 | **50.9%** | **69.3%** | **55.4%** | **35.3%** | **55.8%** | **64.8%** | 53 *fps* | -| **YOLOR-CSP-X** | 640 | **51.1%** | **69.6%** | **55.7%** | **35.7%** | **56.0%** | **65.2%** | - | -| | | | | | | | - -## Installation - -https://github.com/AlexeyAB/darknet - -Docker environment (recommended) -
      Expand - -``` -# get code -git clone https://github.com/AlexeyAB/darknet - -# create the docker container, you can change the share memory size if you have more. -nvidia-docker run --name yolor -it -v your_coco_path/:/coco/ -v your_code_path/:/yolor --shm-size=64g nvcr.io/nvidia/pytorch:21.02-py3 - -# apt install required packages -apt update -apt install -y libopencv-dev - -# edit Makefile -#GPU=1 -#CUDNN=1 -#CUDNN_HALF=1 -#OPENCV=1 -#AVX=1 -#OPENMP=1 -#LIBSO=1 -#ZED_CAMERA=0 -#ZED_CAMERA_v2_8=0 -# -#USE_CPP=0 -#DEBUG=0 -# -#ARCH= -gencode arch=compute_52,code=[sm_70,compute_70] \ -# -gencode arch=compute_61,code=[sm_75,compute_75] \ -# -gencode arch=compute_61,code=[sm_80,compute_80] \ -# -gencode arch=compute_61,code=[sm_86,compute_86] -# -#... - -# build -make -j8 -``` - -
      - -## Testing - -To reproduce inference speed, using: - -``` -CUDA_VISIBLE_DEVICES=0 ./darknet detector demo cfg/coco.data cfg/yolov4-csp.cfg weights/yolov4-csp.weights source/test.mp4 -dont_show -benchmark -``` diff --git a/spaces/kat33/llama.cpp/app.py b/spaces/kat33/llama.cpp/app.py deleted file mode 100644 index 989679678bcec0f5ecfbaf764942f719833106ae..0000000000000000000000000000000000000000 --- a/spaces/kat33/llama.cpp/app.py +++ /dev/null @@ -1,76 +0,0 @@ -import os # to check if file exists -import sys # to flush stdout -import markdown # to render answer - -import gradio as gr -#import transformers -#from transformers import pipeline -from llama_cpp import Llama -from huggingface_hub import hf_hub_download - -model_repo="TheBloke/Nous-Hermes-13B-GGML" -model_filename="nous-hermes-13b.ggmlv3.q4_K_S.bin" - -#model="TheBloke/Nous-Hermes-13B-GGML" -#model="https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_K_S.bin" - -def download_model(): - # See https://github.com/OpenAccess-AI-Collective/ggml-webui/blob/main/tabbed.py - file_path="/home/user/.cache/huggingface/hub/models--TheBloke--Nous-Hermes-13B-GGML/snapshots/f1a48f90a07550e1ba30e347b2be69d4fa5e393b/nous-hermes-13b.ggmlv3.q4_K_S.bin" - if os.path.exists(file_path): - return file_path - else: - print("Downloading model...") - sys.stdout.flush() - file = hf_hub_download( - repo_id=model_repo, filename=model_filename - ) - print("Downloaded " + file) - return file - -def question_answer(context, question, max_tokens): - mfile=download_model() - # structure the prompt to make it easier for the ai - question1="\"\"\"\n" + question + "\n\"\"\"\n" - text=context + "\n\nQuestion: " + question1 + "\nPlease use markdown formatting for answer. \nAnswer:\n" - llm = Llama(model_path=mfile) - output = llm(text, max_tokens=max_tokens, stop=["### Response"], echo=True) - print(output) - - # remove the context and leave only the answer - answer=output['choices'][0]['text'] - answer = answer.replace(text, "", 1) - - # render the markdown and return the html and question - html_answer = markdown.markdown(answer) - return question, html_answer - ''' - Output is of the form: - { - "id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", - "object": "text_completion", - "created": 1679561337, - "model": "./models/7B/ggml-model.bin", - "choices": [ - { - "text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.", - "index": 0, - "logprobs": None, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 14, - "completion_tokens": 28, - "total_tokens": 42 - } - } - ''' - - # old transformers code - #generator = pipeline(model=model, device_map="auto") - #return generator(text) - - -app=gr.Interface(fn=question_answer, inputs=["text", "text",gr.Slider(33, 2333)], outputs=["textbox", "html"]) -app.launch() diff --git a/spaces/kcagle/AutoGPT/tests/__init__.py b/spaces/kcagle/AutoGPT/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/onnx_helper.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/onnx_helper.py deleted file mode 100644 index ca922ca6d410655029e459cf8fd1c323d276c34c..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/onnx_helper.py +++ /dev/null @@ -1,250 +0,0 @@ -from __future__ import division -import datetime -import os -import os.path as osp -import glob -import numpy as np -import cv2 -import sys -import onnxruntime -import onnx -import argparse -from onnx import numpy_helper -from insightface.data import get_image - -class ArcFaceORT: - def __init__(self, model_path, cpu=False): - self.model_path = model_path - # providers = None will use available provider, for onnxruntime-gpu it will be "CUDAExecutionProvider" - self.providers = ['CPUExecutionProvider'] if cpu else None - - #input_size is (w,h), return error message, return None if success - def check(self, track='cfat', test_img = None): - #default is cfat - max_model_size_mb=1024 - max_feat_dim=512 - max_time_cost=15 - if track.startswith('ms1m'): - max_model_size_mb=1024 - max_feat_dim=512 - max_time_cost=10 - elif track.startswith('glint'): - max_model_size_mb=1024 - max_feat_dim=1024 - max_time_cost=20 - elif track.startswith('cfat'): - max_model_size_mb = 1024 - max_feat_dim = 512 - max_time_cost = 15 - elif track.startswith('unconstrained'): - max_model_size_mb=1024 - max_feat_dim=1024 - max_time_cost=30 - else: - return "track not found" - - if not os.path.exists(self.model_path): - return "model_path not exists" - if not os.path.isdir(self.model_path): - return "model_path should be directory" - onnx_files = [] - for _file in os.listdir(self.model_path): - if _file.endswith('.onnx'): - onnx_files.append(osp.join(self.model_path, _file)) - if len(onnx_files)==0: - return "do not have onnx files" - self.model_file = sorted(onnx_files)[-1] - print('use onnx-model:', self.model_file) - try: - session = onnxruntime.InferenceSession(self.model_file, providers=self.providers) - except: - return "load onnx failed" - input_cfg = session.get_inputs()[0] - input_shape = input_cfg.shape - print('input-shape:', input_shape) - if len(input_shape)!=4: - return "length of input_shape should be 4" - if not isinstance(input_shape[0], str): - #return "input_shape[0] should be str to support batch-inference" - print('reset input-shape[0] to None') - model = onnx.load(self.model_file) - model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None' - new_model_file = osp.join(self.model_path, 'zzzzrefined.onnx') - onnx.save(model, new_model_file) - self.model_file = new_model_file - print('use new onnx-model:', self.model_file) - try: - session = onnxruntime.InferenceSession(self.model_file, providers=self.providers) - except: - return "load onnx failed" - input_cfg = session.get_inputs()[0] - input_shape = input_cfg.shape - print('new-input-shape:', input_shape) - - self.image_size = tuple(input_shape[2:4][::-1]) - #print('image_size:', self.image_size) - input_name = input_cfg.name - outputs = session.get_outputs() - output_names = [] - for o in outputs: - output_names.append(o.name) - #print(o.name, o.shape) - if len(output_names)!=1: - return "number of output nodes should be 1" - self.session = session - self.input_name = input_name - self.output_names = output_names - #print(self.output_names) - model = onnx.load(self.model_file) - graph = model.graph - if len(graph.node)<8: - return "too small onnx graph" - - input_size = (112,112) - self.crop = None - if track=='cfat': - crop_file = osp.join(self.model_path, 'crop.txt') - if osp.exists(crop_file): - lines = open(crop_file,'r').readlines() - if len(lines)!=6: - return "crop.txt should contain 6 lines" - lines = [int(x) for x in lines] - self.crop = lines[:4] - input_size = tuple(lines[4:6]) - if input_size!=self.image_size: - return "input-size is inconsistant with onnx model input, %s vs %s"%(input_size, self.image_size) - - self.model_size_mb = os.path.getsize(self.model_file) / float(1024*1024) - if self.model_size_mb > max_model_size_mb: - return "max model size exceed, given %.3f-MB"%self.model_size_mb - - input_mean = None - input_std = None - if track=='cfat': - pn_file = osp.join(self.model_path, 'pixel_norm.txt') - if osp.exists(pn_file): - lines = open(pn_file,'r').readlines() - if len(lines)!=2: - return "pixel_norm.txt should contain 2 lines" - input_mean = float(lines[0]) - input_std = float(lines[1]) - if input_mean is not None or input_std is not None: - if input_mean is None or input_std is None: - return "please set input_mean and input_std simultaneously" - else: - find_sub = False - find_mul = False - for nid, node in enumerate(graph.node[:8]): - print(nid, node.name) - if node.name.startswith('Sub') or node.name.startswith('_minus'): - find_sub = True - if node.name.startswith('Mul') or node.name.startswith('_mul') or node.name.startswith('Div'): - find_mul = True - if find_sub and find_mul: - print("find sub and mul") - #mxnet arcface model - input_mean = 0.0 - input_std = 1.0 - else: - input_mean = 127.5 - input_std = 127.5 - self.input_mean = input_mean - self.input_std = input_std - for initn in graph.initializer: - weight_array = numpy_helper.to_array(initn) - dt = weight_array.dtype - if dt.itemsize<4: - return 'invalid weight type - (%s:%s)' % (initn.name, dt.name) - if test_img is None: - test_img = get_image('Tom_Hanks_54745') - test_img = cv2.resize(test_img, self.image_size) - else: - test_img = cv2.resize(test_img, self.image_size) - feat, cost = self.benchmark(test_img) - batch_result = self.check_batch(test_img) - batch_result_sum = float(np.sum(batch_result)) - if batch_result_sum in [float('inf'), -float('inf')] or batch_result_sum != batch_result_sum: - print(batch_result) - print(batch_result_sum) - return "batch result output contains NaN!" - - if len(feat.shape) < 2: - return "the shape of the feature must be two, but get {}".format(str(feat.shape)) - - if feat.shape[1] > max_feat_dim: - return "max feat dim exceed, given %d"%feat.shape[1] - self.feat_dim = feat.shape[1] - cost_ms = cost*1000 - if cost_ms>max_time_cost: - return "max time cost exceed, given %.4f"%cost_ms - self.cost_ms = cost_ms - print('check stat:, model-size-mb: %.4f, feat-dim: %d, time-cost-ms: %.4f, input-mean: %.3f, input-std: %.3f'%(self.model_size_mb, self.feat_dim, self.cost_ms, self.input_mean, self.input_std)) - return None - - def check_batch(self, img): - if not isinstance(img, list): - imgs = [img, ] * 32 - if self.crop is not None: - nimgs = [] - for img in imgs: - nimg = img[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2], :] - if nimg.shape[0] != self.image_size[1] or nimg.shape[1] != self.image_size[0]: - nimg = cv2.resize(nimg, self.image_size) - nimgs.append(nimg) - imgs = nimgs - blob = cv2.dnn.blobFromImages( - images=imgs, scalefactor=1.0 / self.input_std, size=self.image_size, - mean=(self.input_mean, self.input_mean, self.input_mean), swapRB=True) - net_out = self.session.run(self.output_names, {self.input_name: blob})[0] - return net_out - - - def meta_info(self): - return {'model-size-mb':self.model_size_mb, 'feature-dim':self.feat_dim, 'infer': self.cost_ms} - - - def forward(self, imgs): - if not isinstance(imgs, list): - imgs = [imgs] - input_size = self.image_size - if self.crop is not None: - nimgs = [] - for img in imgs: - nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:] - if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]: - nimg = cv2.resize(nimg, input_size) - nimgs.append(nimg) - imgs = nimgs - blob = cv2.dnn.blobFromImages(imgs, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True) - net_out = self.session.run(self.output_names, {self.input_name : blob})[0] - return net_out - - def benchmark(self, img): - input_size = self.image_size - if self.crop is not None: - nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:] - if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]: - nimg = cv2.resize(nimg, input_size) - img = nimg - blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True) - costs = [] - for _ in range(50): - ta = datetime.datetime.now() - net_out = self.session.run(self.output_names, {self.input_name : blob})[0] - tb = datetime.datetime.now() - cost = (tb-ta).total_seconds() - costs.append(cost) - costs = sorted(costs) - cost = costs[5] - return net_out, cost - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='') - # general - parser.add_argument('workdir', help='submitted work dir', type=str) - parser.add_argument('--track', help='track name, for different challenge', type=str, default='cfat') - args = parser.parse_args() - handler = ArcFaceORT(args.workdir) - err = handler.check(args.track) - print('err:', err) diff --git a/spaces/kevinwang676/VITS2-Mandarin/models.py b/spaces/kevinwang676/VITS2-Mandarin/models.py deleted file mode 100644 index b8faa22acef8d289f3a266a3c84bd726dff72339..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VITS2-Mandarin/models.py +++ /dev/null @@ -1,1271 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - -from pqmf import PQMF -from stft import TorchSTFT - -AVAILABLE_FLOW_TYPES = ["pre_conv", "fft", "mono_layer_inter_residual", "mono_layer_post_residual"] - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class DurationDiscriminator(nn.Module): # vits2 - # TODO : not using "spk conditioning" for now according to the paper. - # Can be a better discriminator if we use it. - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - # self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - # self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - # if gin_channels != 0: - # self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - # x = torch.relu(x) - # x = self.pre_out_norm_1(x) - # x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - # x = torch.relu(x) - # x = self.pre_out_norm_2(x) - # x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - # if g is not None: - # g = torch.detach(g) - # x = x + self.cond(g) - x = self.conv_1(x * x_mask) - # x = torch.relu(x) - # x = self.norm_1(x) - # x = self.drop(x) - x = self.conv_2(x * x_mask) - # x = torch.relu(x) - # x = self.norm_2(x) - # x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingTransformersLayer(nn.Module): # vits2 - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - # vits2 - self.pre_transformer = attentions.Encoder( - self.half_channels, - self.half_channels, - n_heads=2, - n_layers=2, - kernel_size=3, - p_dropout=0.1, - window_size=None - ) - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - # vits2 - self.post_transformer = attentions.Encoder( - self.hidden_channels, - self.hidden_channels, - n_heads=2, - n_layers=2, - kernel_size=3, - p_dropout=0.1, - window_size=None - ) - - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2 - x0_ = x0_ + x0 # vits2 residual connection - h = self.pre(x0_) * x_mask # changed from x0 to x0_ to retain x0 for the flow - h = self.enc(h, x_mask, g=g) - - # vits2 - (experimental;uncomment the following 2 line to use) - # h_ = self.post_transformer(h, x_mask) - # h = h + h_ #vits2 residual connection - - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class FFTransformerCouplingLayer(nn.Module): # vits2 - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=768, - mean_only=False, - gin_channels=0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - isflow=True, - gin_channels=gin_channels - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h_ = self.enc(h, x_mask, g=g) - h = h_ + h - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class MonoTransformerFlowLayer(nn.Module): # vits2 - def __init__( - self, - channels, - hidden_channels, - mean_only=False, - residual_connection=False, - # according to VITS-2 paper fig 1B set residual_connection=True - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.half_channels = channels // 2 - self.mean_only = mean_only - self.residual_connection = residual_connection - # vits2 - self.pre_transformer = attentions.Encoder( - self.half_channels, - self.half_channels, - n_heads=2, - n_layers=2, - kernel_size=3, - p_dropout=0.1, - window_size=None - ) - - self.post = nn.Conv1d(self.half_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - if self.residual_connection: - if not reverse: - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - x0_ = x0 * x_mask - x0_ = self.pre_transformer(x0, x_mask) # vits2 - stats = self.post(x0_) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - x1 = m + x1 * torch.exp(logs) * x_mask - x_ = torch.cat([x0, x1], 1) - x = x + x_ - logdet = torch.sum(torch.log(torch.exp(logs) + 1), [1, 2]) - logdet = logdet + torch.log(torch.tensor(2)) * (x0.shape[1] * x0.shape[2]) - return x, logdet - - else: - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - x0 = x0 / 2 - x0_ = x0 * x_mask - x0_ = self.pre_transformer(x0, x_mask) # vits2 - stats = self.post(x0_) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - x1_ = ((x1 - m) / (1 + torch.exp(-logs))) * x_mask - x = torch.cat([x0, x1_], 1) - return x - else: - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2 - h = x0_ + x0 # vits2 - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x_ = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ResidualCouplingTransformersBlock(nn.Module): # vits2 - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - use_transformer_flows=False, - transformer_flow_type="pre_conv", - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - if use_transformer_flows: - if transformer_flow_type == "pre_conv": - for i in range(n_flows): - self.flows.append( - ResidualCouplingTransformersLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True - ) - ) - self.flows.append(modules.Flip()) - elif transformer_flow_type == "fft": - for i in range(n_flows): - self.flows.append( - FFTransformerCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True - ) - ) - self.flows.append(modules.Flip()) - elif transformer_flow_type == "mono_layer_inter_residual": - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True - ) - ) - self.flows.append(modules.Flip()) - self.flows.append( - MonoTransformerFlowLayer( - channels, hidden_channels, mean_only=True - ) - ) - elif transformer_flow_type == "mono_layer_post_residual": - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - self.flows.append( - MonoTransformerFlowLayer( - channels, hidden_channels, mean_only=True, - residual_connection=True - ) - ) - else: - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class iSTFT_Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, gin_channels=0): - super(iSTFT_Generator, self).__init__() - # self.h = h - self.gen_istft_n_fft = gen_istft_n_fft - self.gen_istft_hop_size = gen_istft_hop_size - - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.post_n_fft = self.gen_istft_n_fft - self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.reflection_pad = torch.nn.ReflectionPad1d((1, 0)) - self.stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, - win_length=self.gen_istft_n_fft) - - def forward(self, x, g=None): - - x = self.conv_pre(x) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.reflection_pad(x) - x = self.conv_post(x) - spec = torch.exp(x[:, :self.post_n_fft // 2 + 1, :]) - phase = math.pi * torch.sin(x[:, self.post_n_fft // 2 + 1:, :]) - out = self.stft.inverse(spec, phase).to(x.device) - return out, None - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class Multiband_iSTFT_Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, - gin_channels=0): - super(Multiband_iSTFT_Generator, self).__init__() - # self.h = h - self.subbands = subbands - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.post_n_fft = gen_istft_n_fft - self.ups.apply(init_weights) - self.reflection_pad = torch.nn.ReflectionPad1d((1, 0)) - self.reshape_pixelshuffle = [] - - self.subband_conv_post = weight_norm(Conv1d(ch, self.subbands * (self.post_n_fft + 2), 7, 1, padding=3)) - - self.subband_conv_post.apply(init_weights) - - self.gen_istft_n_fft = gen_istft_n_fft - self.gen_istft_hop_size = gen_istft_hop_size - - def forward(self, x, g=None): - stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, - win_length=self.gen_istft_n_fft).to(x.device) - pqmf = PQMF(x.device) - - x = self.conv_pre(x) # [B, ch, length] - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - - x = F.leaky_relu(x) - x = self.reflection_pad(x) - x = self.subband_conv_post(x) - x = torch.reshape(x, (x.shape[0], self.subbands, x.shape[1] // self.subbands, x.shape[-1])) - - spec = torch.exp(x[:, :, :self.post_n_fft // 2 + 1, :]) - phase = math.pi * torch.sin(x[:, :, self.post_n_fft // 2 + 1:, :]) - - y_mb_hat = stft.inverse( - torch.reshape(spec, (spec.shape[0] * self.subbands, self.gen_istft_n_fft // 2 + 1, spec.shape[-1])), - torch.reshape(phase, (phase.shape[0] * self.subbands, self.gen_istft_n_fft // 2 + 1, phase.shape[-1]))) - y_mb_hat = torch.reshape(y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1])) - y_mb_hat = y_mb_hat.squeeze(-2) - - y_g_hat = pqmf.synthesis(y_mb_hat) - - return y_g_hat, y_mb_hat - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class Multistream_iSTFT_Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size, subbands, - gin_channels=0): - super(Multistream_iSTFT_Generator, self).__init__() - # self.h = h - self.subbands = subbands - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = weight_norm(Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.post_n_fft = gen_istft_n_fft - self.ups.apply(init_weights) - self.reflection_pad = torch.nn.ReflectionPad1d((1, 0)) - self.reshape_pixelshuffle = [] - - self.subband_conv_post = weight_norm(Conv1d(ch, self.subbands * (self.post_n_fft + 2), 7, 1, padding=3)) - - self.subband_conv_post.apply(init_weights) - - self.gen_istft_n_fft = gen_istft_n_fft - self.gen_istft_hop_size = gen_istft_hop_size - - updown_filter = torch.zeros((self.subbands, self.subbands, self.subbands)).float() - for k in range(self.subbands): - updown_filter[k, k, 0] = 1.0 - self.register_buffer("updown_filter", updown_filter) - #self.multistream_conv_post = weight_norm(Conv1d(4, 1, kernel_size=63, bias=False, padding=get_padding(63, 1))) - self.multistream_conv_post = weight_norm(Conv1d(self.subbands, 1, kernel_size=63, bias=False, padding=get_padding(63, 1))) # from MB-iSTFT-VITS-44100-Ja - self.multistream_conv_post.apply(init_weights) - - def forward(self, x, g=None): - stft = TorchSTFT(filter_length=self.gen_istft_n_fft, hop_length=self.gen_istft_hop_size, - win_length=self.gen_istft_n_fft).to(x.device) - # pqmf = PQMF(x.device) - - x = self.conv_pre(x) # [B, ch, length] - - for i in range(self.num_upsamples): - - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - - x = F.leaky_relu(x) - x = self.reflection_pad(x) - x = self.subband_conv_post(x) - x = torch.reshape(x, (x.shape[0], self.subbands, x.shape[1] // self.subbands, x.shape[-1])) - - spec = torch.exp(x[:, :, :self.post_n_fft // 2 + 1, :]) - phase = math.pi * torch.sin(x[:, :, self.post_n_fft // 2 + 1:, :]) - - y_mb_hat = stft.inverse( - torch.reshape(spec, (spec.shape[0] * self.subbands, self.gen_istft_n_fft // 2 + 1, spec.shape[-1])), - torch.reshape(phase, (phase.shape[0] * self.subbands, self.gen_istft_n_fft // 2 + 1, phase.shape[-1]))) - y_mb_hat = torch.reshape(y_mb_hat, (x.shape[0], self.subbands, 1, y_mb_hat.shape[-1])) - y_mb_hat = y_mb_hat.squeeze(-2) - - #y_mb_hat = F.conv_transpose1d(y_mb_hat, self.updown_filter.cuda(x.device) * self.subbands, stride=self.subbands) - y_mb_hat = F.conv_transpose1d(y_mb_hat, self.updown_filter.to(x.device) * self.subbands, stride=self.subbands) - - y_g_hat = self.multistream_conv_post(y_mb_hat) - - return y_g_hat, y_mb_hat - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gen_istft_n_fft, - gen_istft_hop_size, - n_speakers=0, - gin_channels=0, - use_sdp=True, - ms_istft_vits=False, - mb_istft_vits=False, - subbands=False, - istft_vits=False, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.ms_istft_vits = ms_istft_vits - self.mb_istft_vits = mb_istft_vits - self.istft_vits = istft_vits - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", False) - self.use_transformer_flows = kwargs.get("use_transformer_flows", False) - self.transformer_flow_type = kwargs.get("transformer_flow_type", "mono_layer_post_residual") - if self.use_transformer_flows: - assert self.transformer_flow_type in AVAILABLE_FLOW_TYPES, f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" - self.use_sdp = use_sdp - # self.use_duration_discriminator = kwargs.get("use_duration_discriminator", False) - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - else: - self.enc_gin_channels = 0 - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - - if mb_istft_vits == True: - print('Mutli-band iSTFT VITS2') - self.dec = Multiband_iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, upsample_initial_channel, upsample_kernel_sizes, - gen_istft_n_fft, gen_istft_hop_size, subbands, - gin_channels=gin_channels) - elif ms_istft_vits == True: - print('Mutli-stream iSTFT VITS2') - self.dec = Multistream_iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, upsample_initial_channel, upsample_kernel_sizes, - gen_istft_n_fft, gen_istft_hop_size, subbands, - gin_channels=gin_channels) - elif istft_vits == True: - print('iSTFT-VITS2') - self.dec = iSTFT_Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, - upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gen_istft_n_fft, - gen_istft_hop_size, gin_channels=gin_channels) - else: - print('No iSTFT arguments found in json file') - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) # vits 2 - - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.flow = ResidualCouplingTransformersBlock( - inter_channels, - hidden_channels, - 5, - 1, - 4, - gin_channels=gin_channels, - use_transformer_flows=self.use_transformer_flows, - transformer_flow_type=self.transformer_flow_type - ) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, g=g) # vits2? - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.) - logw_ = torch.log(w + 1e-6) * x_mask - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o, o_mb = self.dec(z_slice, g=g) - return o, o_mb, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, g=g) - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - - o, o_mb = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, o_mb, attn, y_mask, (z, z_p, m_p, logs_p) - - -''' ## (obsolete) currently vits-2 is not capable of voice conversion - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): # 아예 사용되지 않음 - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat, o_hat_mb = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, o_hat_mb, y_mask, (z, z_p, z_hat) -''' diff --git a/spaces/kittyposter12/Dungeons-and-Diffusion/README.md b/spaces/kittyposter12/Dungeons-and-Diffusion/README.md deleted file mode 100644 index e1564714c53dfefb2039892de7b2dc49d7d6a99d..0000000000000000000000000000000000000000 --- a/spaces/kittyposter12/Dungeons-and-Diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dungeons And Diffusion -emoji: 🐢 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.10.0 -app_file: app.py -pinned: false -duplicated_from: akhaliq/Dungeons-and-Diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/krystaltechnology/image-video-colorization/pages/03_B&W_Images_Colorizer.py b/spaces/krystaltechnology/image-video-colorization/pages/03_B&W_Images_Colorizer.py deleted file mode 100644 index 045e25f8d2218da3d3a40d2d71354b61bc8a4299..0000000000000000000000000000000000000000 --- a/spaces/krystaltechnology/image-video-colorization/pages/03_B&W_Images_Colorizer.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import zipfile - -import streamlit as st -from PIL import Image -from streamlit_lottie import st_lottie - -from models.deep_colorization.colorizers import eccv16 -from utils import colorize_image, change_model, load_lottieurl - -#st.set_page_config(page_title="Image & Video Colorizer", page_icon="🎨", layout="wide") - -st.title("B&W Images Colorizer") - - -loaded_model = eccv16(pretrained=True).eval() -current_model = "None" - -st.write(""" - ##### Input a black and white image and get a colorized version of it. - ###### ➠ If you want to colorize multiple images just upload them all at once. - ###### ➠ Uploading already colored images won't raise errors but images won't look good.""") - - -def main(): - model = st.selectbox( - "Select Model (Both models have their pros and cons, I recommend trying both and keeping the best for you task)", - ["ECCV16", "SIGGRAPH17"], index=0) - - # Make the user select a model - loaded_model = change_model(current_model, model) - st.write(f"Model is now {model}") - - # Ask the user if he wants to see colorization - display_results = st.checkbox('Display results in real time', value=True) - - # Input for the user to upload images - uploaded_file = st.file_uploader("Upload your images here...", type=['jpg', 'png', 'jpeg'], - accept_multiple_files=True) - - # If the user clicks on the button - if st.button("Colorize"): - # If the user uploaded images - if uploaded_file is not None: - if display_results: - col1, col2 = st.columns([0.5, 0.5]) - with col1: - st.markdown('

      Before

      ', unsafe_allow_html=True) - with col2: - st.markdown('

      After

      ', unsafe_allow_html=True) - else: - col1, col2, col3 = st.columns(3) - - for i, file in enumerate(uploaded_file): - file_extension = os.path.splitext(file.name)[1].lower() - if file_extension in ['.jpg', '.png', '.jpeg']: - image = Image.open(file) - if display_results: - with col1: - st.image(image, use_column_width="always") - with col2: - with st.spinner("Colorizing image..."): - out_img, new_img = colorize_image(file, loaded_model) - new_img.save("IMG_" + str(i+1) + ".jpg") - st.image(out_img, use_column_width="always") - - else: - out_img, new_img = colorize_image(file, loaded_model) - new_img.save("IMG_" + str(i+1) + ".jpg") - - if len(uploaded_file) > 1: - # Create a zip file - zip_filename = "colorized_images.zip" - with zipfile.ZipFile(zip_filename, "w") as zip_file: - # Add colorized images to the zip file - for i in range(len(uploaded_file)): - zip_file.write("IMG_" + str(i + 1) + ".jpg", "IMG_" + str(i) + ".jpg") - with col2: - # Provide the zip file data for download - st.download_button( - label="Download Colorized Images" if len(uploaded_file) > 1 else "Download Colorized Image", - data=open(zip_filename, "rb").read(), - file_name=zip_filename, - ) - else: - with col2: - st.download_button( - label="Download Colorized Image", - data=open("IMG_1.jpg", "rb").read(), - file_name="IMG_1.jpg", - ) - - else: - st.warning('Upload a file', icon="⚠️") - - -if __name__ == "__main__": - main() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b6262459.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b6262459.css deleted file mode 100644 index fdf7b094f97f8dfedf79266688be78104c02edfc..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b6262459.css +++ /dev/null @@ -1 +0,0 @@ -input.svelte-q8uklq{position:absolute;top:var(--size-2);right:var(--size-2);bottom:var(--size-2);left:var(--size-2);flex:1 1 0%;transform:translate(-.1px);outline:none;border:none;background:transparent}span.svelte-q8uklq{flex:1 1 0%;outline:none;padding:var(--size-2)}.header.svelte-q8uklq{transform:translate(0);font:var(--weight-bold)}.edit.svelte-q8uklq{opacity:0;pointer-events:none}.button-wrap.svelte-8hrj8a:hover svg.svelte-8hrj8a.svelte-8hrj8a{color:var(--color-accent)}.button-wrap.svelte-8hrj8a svg.svelte-8hrj8a.svelte-8hrj8a{margin-right:var(--size-1);margin-left:-5px}.label.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{margin-top:var(--size-6)}.label.svelte-8hrj8a p.svelte-8hrj8a.svelte-8hrj8a{position:relative;z-index:var(--layer-4);margin-bottom:var(--size-2);color:var(--block-label-text-color);font-size:var(--block-label-text-size)}.table-wrap.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{position:relative;transition:.15s;border:1px solid var(--border-color-primary);border-radius:var(--table-radius);overflow-x:scroll;overflow-y:hidden}.dragging.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{border-color:var(--color-accent)}.no-wrap.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{white-space:nowrap}table.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{transition:.15s;width:var(--size-full);table-layout:auto;overflow:hidden;color:var(--body-text-color);font-size:var(--input-text-size);line-height:var(--line-md);font-family:var(--font-mono)}table.dragging.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{opacity:.4}thead.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{position:sticky;top:0;left:0;z-index:var(--layer-1);box-shadow:var(--shadow-drop)}tr.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{border-bottom:1px solid var(--border-color-primary);text-align:left}tr.svelte-8hrj8a>.svelte-8hrj8a+.svelte-8hrj8a{border-right-width:0px;border-left-width:1px;border-style:solid;border-color:var(--border-color-primary)}th.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a,td.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{--ring-color:transparent;position:relative;outline:none;box-shadow:inset 0 0 0 1px var(--ring-color);padding:0}th.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a:first-child{border-top-left-radius:var(--table-radius)}th.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a:last-child{border-top-right-radius:var(--table-radius)}th.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a:focus-within,td.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a:focus-within{--ring-color:var(--color-accent)}tr.svelte-8hrj8a:last-child td.svelte-8hrj8a.svelte-8hrj8a:first-child{border-bottom-left-radius:var(--table-radius)}tr.svelte-8hrj8a:last-child td.svelte-8hrj8a.svelte-8hrj8a:last-child{border-bottom-right-radius:var(--table-radius)}tr.svelte-8hrj8a th.svelte-8hrj8a.svelte-8hrj8a{background:var(--table-even-background-fill)}th.svelte-8hrj8a svg.svelte-8hrj8a.svelte-8hrj8a{fill:currentColor;font-size:10px}.sort-button.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{display:flex;flex:none;justify-content:center;align-items:center;transition:.15s;cursor:pointer;padding:var(--size-2);color:var(--body-text-color-subdued);line-height:var(--text-sm)}.sort-button.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a:hover{color:var(--body-text-color)}.des.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{transform:scaleY(-1)}.sort-button.sorted.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{color:var(--color-accent)}tbody.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{overflow-y:scroll}tbody.svelte-8hrj8a>tr.svelte-8hrj8a.svelte-8hrj8a:last-child{border:none}tbody.svelte-8hrj8a>tr.svelte-8hrj8a.svelte-8hrj8a:nth-child(even){background:var(--table-even-background-fill)}tbody.svelte-8hrj8a>tr.svelte-8hrj8a.svelte-8hrj8a:nth-child(odd){background:var(--table-odd-background-fill)}tbody.svelte-8hrj8a>tr.svelte-8hrj8a.svelte-8hrj8a:nth-child(odd):focus{background:var(--background-fill-primary)}.editing.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{background:var(--table-editing)}.cell-wrap.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{display:flex;align-items:center;outline:none;height:var(--size-full);min-height:var(--size-9)}.controls-wrap.svelte-8hrj8a.svelte-8hrj8a.svelte-8hrj8a{display:flex;justify-content:flex-end;padding-top:var(--size-2)}.controls-wrap.svelte-8hrj8a>.svelte-8hrj8a+.svelte-8hrj8a{margin-left:var(--size-1)}div.svelte-1nw9bhs{position:relative;overflow:hidden}.hide.svelte-1nw9bhs{display:none} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03d58ab8.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03d58ab8.css deleted file mode 100644 index c02568c42d3cf011dc008a256fdece5721dbccab..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-03d58ab8.css +++ /dev/null @@ -1 +0,0 @@ -.hide.svelte-ydeks8{display:none} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/heading.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/heading.py deleted file mode 100644 index 064d0702a56587a65a5710ad599bd264cc1db568..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_block/heading.py +++ /dev/null @@ -1,71 +0,0 @@ -""" Atex heading (#, ##, ...) """ -from __future__ import annotations - -import logging - -from ..common.utils import isSpace -from .state_block import StateBlock - -LOGGER = logging.getLogger(__name__) - - -def heading(state: StateBlock, startLine: int, endLine: int, silent: bool): - LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent) - - pos = state.bMarks[startLine] + state.tShift[startLine] - maximum = state.eMarks[startLine] - - # if it's indented more than 3 spaces, it should be a code block - if state.sCount[startLine] - state.blkIndent >= 4: - return False - - ch: int | None = state.srcCharCode[pos] - - # /* # */ - if ch != 0x23 or pos >= maximum: - return False - - # count heading level - level = 1 - pos += 1 - try: - ch = state.srcCharCode[pos] - except IndexError: - ch = None - # /* # */ - while ch == 0x23 and pos < maximum and level <= 6: - level += 1 - pos += 1 - try: - ch = state.srcCharCode[pos] - except IndexError: - ch = None - - if level > 6 or (pos < maximum and not isSpace(ch)): - return False - - if silent: - return True - - # Let's cut tails like ' ### ' from the end of string - - maximum = state.skipSpacesBack(maximum, pos) - tmp = state.skipCharsBack(maximum, 0x23, pos) # # - if tmp > pos and isSpace(state.srcCharCode[tmp - 1]): - maximum = tmp - - state.line = startLine + 1 - - token = state.push("heading_open", "h" + str(level), 1) - token.markup = "########"[:level] - token.map = [startLine, state.line] - - token = state.push("inline", "", 0) - token.content = state.src[pos:maximum].strip() - token.map = [startLine, state.line] - token.children = [] - - token = state.push("heading_close", "h" + str(level), -1) - token.markup = "########"[:level] - - return True diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py deleted file mode 100644 index 44f97adbb76aeaec2578cedfe60219a3278fd2ca..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py +++ /dev/null @@ -1,1141 +0,0 @@ -BASE_COLORS = { - 'b': (0, 0, 1), # blue - 'g': (0, 0.5, 0), # green - 'r': (1, 0, 0), # red - 'c': (0, 0.75, 0.75), # cyan - 'm': (0.75, 0, 0.75), # magenta - 'y': (0.75, 0.75, 0), # yellow - 'k': (0, 0, 0), # black - 'w': (1, 1, 1), # white -} - - -# These colors are from Tableau -TABLEAU_COLORS = { - 'tab:blue': '#1f77b4', - 'tab:orange': '#ff7f0e', - 'tab:green': '#2ca02c', - 'tab:red': '#d62728', - 'tab:purple': '#9467bd', - 'tab:brown': '#8c564b', - 'tab:pink': '#e377c2', - 'tab:gray': '#7f7f7f', - 'tab:olive': '#bcbd22', - 'tab:cyan': '#17becf', -} - - -# This mapping of color names -> hex values is taken from -# a survey run by Randall Munroe see: -# https://blog.xkcd.com/2010/05/03/color-survey-results/ -# for more details. The results are hosted at -# https://xkcd.com/color/rgb/ -# and also available as a text file at -# https://xkcd.com/color/rgb.txt -# -# License: https://creativecommons.org/publicdomain/zero/1.0/ -XKCD_COLORS = { - 'cloudy blue': '#acc2d9', - 'dark pastel green': '#56ae57', - 'dust': '#b2996e', - 'electric lime': '#a8ff04', - 'fresh green': '#69d84f', - 'light eggplant': '#894585', - 'nasty green': '#70b23f', - 'really light blue': '#d4ffff', - 'tea': '#65ab7c', - 'warm purple': '#952e8f', - 'yellowish tan': '#fcfc81', - 'cement': '#a5a391', - 'dark grass green': '#388004', - 'dusty teal': '#4c9085', - 'grey teal': '#5e9b8a', - 'macaroni and cheese': '#efb435', - 'pinkish tan': '#d99b82', - 'spruce': '#0a5f38', - 'strong blue': '#0c06f7', - 'toxic green': '#61de2a', - 'windows blue': '#3778bf', - 'blue blue': '#2242c7', - 'blue with a hint of purple': '#533cc6', - 'booger': '#9bb53c', - 'bright sea green': '#05ffa6', - 'dark green blue': '#1f6357', - 'deep turquoise': '#017374', - 'green teal': '#0cb577', - 'strong pink': '#ff0789', - 'bland': '#afa88b', - 'deep aqua': '#08787f', - 'lavender pink': '#dd85d7', - 'light moss green': '#a6c875', - 'light seafoam green': '#a7ffb5', - 'olive yellow': '#c2b709', - 'pig pink': '#e78ea5', - 'deep lilac': '#966ebd', - 'desert': '#ccad60', - 'dusty lavender': '#ac86a8', - 'purpley grey': '#947e94', - 'purply': '#983fb2', - 'candy pink': '#ff63e9', - 'light pastel green': '#b2fba5', - 'boring green': '#63b365', - 'kiwi green': '#8ee53f', - 'light grey green': '#b7e1a1', - 'orange pink': '#ff6f52', - 'tea green': '#bdf8a3', - 'very light brown': '#d3b683', - 'egg shell': '#fffcc4', - 'eggplant purple': '#430541', - 'powder pink': '#ffb2d0', - 'reddish grey': '#997570', - 'baby shit brown': '#ad900d', - 'liliac': '#c48efd', - 'stormy blue': '#507b9c', - 'ugly brown': '#7d7103', - 'custard': '#fffd78', - 'darkish pink': '#da467d', - 'deep brown': '#410200', - 'greenish beige': '#c9d179', - 'manilla': '#fffa86', - 'off blue': '#5684ae', - 'battleship grey': '#6b7c85', - 'browny green': '#6f6c0a', - 'bruise': '#7e4071', - 'kelley green': '#009337', - 'sickly yellow': '#d0e429', - 'sunny yellow': '#fff917', - 'azul': '#1d5dec', - 'darkgreen': '#054907', - 'green/yellow': '#b5ce08', - 'lichen': '#8fb67b', - 'light light green': '#c8ffb0', - 'pale gold': '#fdde6c', - 'sun yellow': '#ffdf22', - 'tan green': '#a9be70', - 'burple': '#6832e3', - 'butterscotch': '#fdb147', - 'toupe': '#c7ac7d', - 'dark cream': '#fff39a', - 'indian red': '#850e04', - 'light lavendar': '#efc0fe', - 'poison green': '#40fd14', - 'baby puke green': '#b6c406', - 'bright yellow green': '#9dff00', - 'charcoal grey': '#3c4142', - 'squash': '#f2ab15', - 'cinnamon': '#ac4f06', - 'light pea green': '#c4fe82', - 'radioactive green': '#2cfa1f', - 'raw sienna': '#9a6200', - 'baby purple': '#ca9bf7', - 'cocoa': '#875f42', - 'light royal blue': '#3a2efe', - 'orangeish': '#fd8d49', - 'rust brown': '#8b3103', - 'sand brown': '#cba560', - 'swamp': '#698339', - 'tealish green': '#0cdc73', - 'burnt siena': '#b75203', - 'camo': '#7f8f4e', - 'dusk blue': '#26538d', - 'fern': '#63a950', - 'old rose': '#c87f89', - 'pale light green': '#b1fc99', - 'peachy pink': '#ff9a8a', - 'rosy pink': '#f6688e', - 'light bluish green': '#76fda8', - 'light bright green': '#53fe5c', - 'light neon green': '#4efd54', - 'light seafoam': '#a0febf', - 'tiffany blue': '#7bf2da', - 'washed out green': '#bcf5a6', - 'browny orange': '#ca6b02', - 'nice blue': '#107ab0', - 'sapphire': '#2138ab', - 'greyish teal': '#719f91', - 'orangey yellow': '#fdb915', - 'parchment': '#fefcaf', - 'straw': '#fcf679', - 'very dark brown': '#1d0200', - 'terracota': '#cb6843', - 'ugly blue': '#31668a', - 'clear blue': '#247afd', - 'creme': '#ffffb6', - 'foam green': '#90fda9', - 'grey/green': '#86a17d', - 'light gold': '#fddc5c', - 'seafoam blue': '#78d1b6', - 'topaz': '#13bbaf', - 'violet pink': '#fb5ffc', - 'wintergreen': '#20f986', - 'yellow tan': '#ffe36e', - 'dark fuchsia': '#9d0759', - 'indigo blue': '#3a18b1', - 'light yellowish green': '#c2ff89', - 'pale magenta': '#d767ad', - 'rich purple': '#720058', - 'sunflower yellow': '#ffda03', - 'green/blue': '#01c08d', - 'leather': '#ac7434', - 'racing green': '#014600', - 'vivid purple': '#9900fa', - 'dark royal blue': '#02066f', - 'hazel': '#8e7618', - 'muted pink': '#d1768f', - 'booger green': '#96b403', - 'canary': '#fdff63', - 'cool grey': '#95a3a6', - 'dark taupe': '#7f684e', - 'darkish purple': '#751973', - 'true green': '#089404', - 'coral pink': '#ff6163', - 'dark sage': '#598556', - 'dark slate blue': '#214761', - 'flat blue': '#3c73a8', - 'mushroom': '#ba9e88', - 'rich blue': '#021bf9', - 'dirty purple': '#734a65', - 'greenblue': '#23c48b', - 'icky green': '#8fae22', - 'light khaki': '#e6f2a2', - 'warm blue': '#4b57db', - 'dark hot pink': '#d90166', - 'deep sea blue': '#015482', - 'carmine': '#9d0216', - 'dark yellow green': '#728f02', - 'pale peach': '#ffe5ad', - 'plum purple': '#4e0550', - 'golden rod': '#f9bc08', - 'neon red': '#ff073a', - 'old pink': '#c77986', - 'very pale blue': '#d6fffe', - 'blood orange': '#fe4b03', - 'grapefruit': '#fd5956', - 'sand yellow': '#fce166', - 'clay brown': '#b2713d', - 'dark blue grey': '#1f3b4d', - 'flat green': '#699d4c', - 'light green blue': '#56fca2', - 'warm pink': '#fb5581', - 'dodger blue': '#3e82fc', - 'gross green': '#a0bf16', - 'ice': '#d6fffa', - 'metallic blue': '#4f738e', - 'pale salmon': '#ffb19a', - 'sap green': '#5c8b15', - 'algae': '#54ac68', - 'bluey grey': '#89a0b0', - 'greeny grey': '#7ea07a', - 'highlighter green': '#1bfc06', - 'light light blue': '#cafffb', - 'light mint': '#b6ffbb', - 'raw umber': '#a75e09', - 'vivid blue': '#152eff', - 'deep lavender': '#8d5eb7', - 'dull teal': '#5f9e8f', - 'light greenish blue': '#63f7b4', - 'mud green': '#606602', - 'pinky': '#fc86aa', - 'red wine': '#8c0034', - 'shit green': '#758000', - 'tan brown': '#ab7e4c', - 'darkblue': '#030764', - 'rosa': '#fe86a4', - 'lipstick': '#d5174e', - 'pale mauve': '#fed0fc', - 'claret': '#680018', - 'dandelion': '#fedf08', - 'orangered': '#fe420f', - 'poop green': '#6f7c00', - 'ruby': '#ca0147', - 'dark': '#1b2431', - 'greenish turquoise': '#00fbb0', - 'pastel red': '#db5856', - 'piss yellow': '#ddd618', - 'bright cyan': '#41fdfe', - 'dark coral': '#cf524e', - 'algae green': '#21c36f', - 'darkish red': '#a90308', - 'reddy brown': '#6e1005', - 'blush pink': '#fe828c', - 'camouflage green': '#4b6113', - 'lawn green': '#4da409', - 'putty': '#beae8a', - 'vibrant blue': '#0339f8', - 'dark sand': '#a88f59', - 'purple/blue': '#5d21d0', - 'saffron': '#feb209', - 'twilight': '#4e518b', - 'warm brown': '#964e02', - 'bluegrey': '#85a3b2', - 'bubble gum pink': '#ff69af', - 'duck egg blue': '#c3fbf4', - 'greenish cyan': '#2afeb7', - 'petrol': '#005f6a', - 'royal': '#0c1793', - 'butter': '#ffff81', - 'dusty orange': '#f0833a', - 'off yellow': '#f1f33f', - 'pale olive green': '#b1d27b', - 'orangish': '#fc824a', - 'leaf': '#71aa34', - 'light blue grey': '#b7c9e2', - 'dried blood': '#4b0101', - 'lightish purple': '#a552e6', - 'rusty red': '#af2f0d', - 'lavender blue': '#8b88f8', - 'light grass green': '#9af764', - 'light mint green': '#a6fbb2', - 'sunflower': '#ffc512', - 'velvet': '#750851', - 'brick orange': '#c14a09', - 'lightish red': '#fe2f4a', - 'pure blue': '#0203e2', - 'twilight blue': '#0a437a', - 'violet red': '#a50055', - 'yellowy brown': '#ae8b0c', - 'carnation': '#fd798f', - 'muddy yellow': '#bfac05', - 'dark seafoam green': '#3eaf76', - 'deep rose': '#c74767', - 'dusty red': '#b9484e', - 'grey/blue': '#647d8e', - 'lemon lime': '#bffe28', - 'purple/pink': '#d725de', - 'brown yellow': '#b29705', - 'purple brown': '#673a3f', - 'wisteria': '#a87dc2', - 'banana yellow': '#fafe4b', - 'lipstick red': '#c0022f', - 'water blue': '#0e87cc', - 'brown grey': '#8d8468', - 'vibrant purple': '#ad03de', - 'baby green': '#8cff9e', - 'barf green': '#94ac02', - 'eggshell blue': '#c4fff7', - 'sandy yellow': '#fdee73', - 'cool green': '#33b864', - 'pale': '#fff9d0', - 'blue/grey': '#758da3', - 'hot magenta': '#f504c9', - 'greyblue': '#77a1b5', - 'purpley': '#8756e4', - 'baby shit green': '#889717', - 'brownish pink': '#c27e79', - 'dark aquamarine': '#017371', - 'diarrhea': '#9f8303', - 'light mustard': '#f7d560', - 'pale sky blue': '#bdf6fe', - 'turtle green': '#75b84f', - 'bright olive': '#9cbb04', - 'dark grey blue': '#29465b', - 'greeny brown': '#696006', - 'lemon green': '#adf802', - 'light periwinkle': '#c1c6fc', - 'seaweed green': '#35ad6b', - 'sunshine yellow': '#fffd37', - 'ugly purple': '#a442a0', - 'medium pink': '#f36196', - 'puke brown': '#947706', - 'very light pink': '#fff4f2', - 'viridian': '#1e9167', - 'bile': '#b5c306', - 'faded yellow': '#feff7f', - 'very pale green': '#cffdbc', - 'vibrant green': '#0add08', - 'bright lime': '#87fd05', - 'spearmint': '#1ef876', - 'light aquamarine': '#7bfdc7', - 'light sage': '#bcecac', - 'yellowgreen': '#bbf90f', - 'baby poo': '#ab9004', - 'dark seafoam': '#1fb57a', - 'deep teal': '#00555a', - 'heather': '#a484ac', - 'rust orange': '#c45508', - 'dirty blue': '#3f829d', - 'fern green': '#548d44', - 'bright lilac': '#c95efb', - 'weird green': '#3ae57f', - 'peacock blue': '#016795', - 'avocado green': '#87a922', - 'faded orange': '#f0944d', - 'grape purple': '#5d1451', - 'hot green': '#25ff29', - 'lime yellow': '#d0fe1d', - 'mango': '#ffa62b', - 'shamrock': '#01b44c', - 'bubblegum': '#ff6cb5', - 'purplish brown': '#6b4247', - 'vomit yellow': '#c7c10c', - 'pale cyan': '#b7fffa', - 'key lime': '#aeff6e', - 'tomato red': '#ec2d01', - 'lightgreen': '#76ff7b', - 'merlot': '#730039', - 'night blue': '#040348', - 'purpleish pink': '#df4ec8', - 'apple': '#6ecb3c', - 'baby poop green': '#8f9805', - 'green apple': '#5edc1f', - 'heliotrope': '#d94ff5', - 'yellow/green': '#c8fd3d', - 'almost black': '#070d0d', - 'cool blue': '#4984b8', - 'leafy green': '#51b73b', - 'mustard brown': '#ac7e04', - 'dusk': '#4e5481', - 'dull brown': '#876e4b', - 'frog green': '#58bc08', - 'vivid green': '#2fef10', - 'bright light green': '#2dfe54', - 'fluro green': '#0aff02', - 'kiwi': '#9cef43', - 'seaweed': '#18d17b', - 'navy green': '#35530a', - 'ultramarine blue': '#1805db', - 'iris': '#6258c4', - 'pastel orange': '#ff964f', - 'yellowish orange': '#ffab0f', - 'perrywinkle': '#8f8ce7', - 'tealish': '#24bca8', - 'dark plum': '#3f012c', - 'pear': '#cbf85f', - 'pinkish orange': '#ff724c', - 'midnight purple': '#280137', - 'light urple': '#b36ff6', - 'dark mint': '#48c072', - 'greenish tan': '#bccb7a', - 'light burgundy': '#a8415b', - 'turquoise blue': '#06b1c4', - 'ugly pink': '#cd7584', - 'sandy': '#f1da7a', - 'electric pink': '#ff0490', - 'muted purple': '#805b87', - 'mid green': '#50a747', - 'greyish': '#a8a495', - 'neon yellow': '#cfff04', - 'banana': '#ffff7e', - 'carnation pink': '#ff7fa7', - 'tomato': '#ef4026', - 'sea': '#3c9992', - 'muddy brown': '#886806', - 'turquoise green': '#04f489', - 'buff': '#fef69e', - 'fawn': '#cfaf7b', - 'muted blue': '#3b719f', - 'pale rose': '#fdc1c5', - 'dark mint green': '#20c073', - 'amethyst': '#9b5fc0', - 'blue/green': '#0f9b8e', - 'chestnut': '#742802', - 'sick green': '#9db92c', - 'pea': '#a4bf20', - 'rusty orange': '#cd5909', - 'stone': '#ada587', - 'rose red': '#be013c', - 'pale aqua': '#b8ffeb', - 'deep orange': '#dc4d01', - 'earth': '#a2653e', - 'mossy green': '#638b27', - 'grassy green': '#419c03', - 'pale lime green': '#b1ff65', - 'light grey blue': '#9dbcd4', - 'pale grey': '#fdfdfe', - 'asparagus': '#77ab56', - 'blueberry': '#464196', - 'purple red': '#990147', - 'pale lime': '#befd73', - 'greenish teal': '#32bf84', - 'caramel': '#af6f09', - 'deep magenta': '#a0025c', - 'light peach': '#ffd8b1', - 'milk chocolate': '#7f4e1e', - 'ocher': '#bf9b0c', - 'off green': '#6ba353', - 'purply pink': '#f075e6', - 'lightblue': '#7bc8f6', - 'dusky blue': '#475f94', - 'golden': '#f5bf03', - 'light beige': '#fffeb6', - 'butter yellow': '#fffd74', - 'dusky purple': '#895b7b', - 'french blue': '#436bad', - 'ugly yellow': '#d0c101', - 'greeny yellow': '#c6f808', - 'orangish red': '#f43605', - 'shamrock green': '#02c14d', - 'orangish brown': '#b25f03', - 'tree green': '#2a7e19', - 'deep violet': '#490648', - 'gunmetal': '#536267', - 'blue/purple': '#5a06ef', - 'cherry': '#cf0234', - 'sandy brown': '#c4a661', - 'warm grey': '#978a84', - 'dark indigo': '#1f0954', - 'midnight': '#03012d', - 'bluey green': '#2bb179', - 'grey pink': '#c3909b', - 'soft purple': '#a66fb5', - 'blood': '#770001', - 'brown red': '#922b05', - 'medium grey': '#7d7f7c', - 'berry': '#990f4b', - 'poo': '#8f7303', - 'purpley pink': '#c83cb9', - 'light salmon': '#fea993', - 'snot': '#acbb0d', - 'easter purple': '#c071fe', - 'light yellow green': '#ccfd7f', - 'dark navy blue': '#00022e', - 'drab': '#828344', - 'light rose': '#ffc5cb', - 'rouge': '#ab1239', - 'purplish red': '#b0054b', - 'slime green': '#99cc04', - 'baby poop': '#937c00', - 'irish green': '#019529', - 'pink/purple': '#ef1de7', - 'dark navy': '#000435', - 'greeny blue': '#42b395', - 'light plum': '#9d5783', - 'pinkish grey': '#c8aca9', - 'dirty orange': '#c87606', - 'rust red': '#aa2704', - 'pale lilac': '#e4cbff', - 'orangey red': '#fa4224', - 'primary blue': '#0804f9', - 'kermit green': '#5cb200', - 'brownish purple': '#76424e', - 'murky green': '#6c7a0e', - 'wheat': '#fbdd7e', - 'very dark purple': '#2a0134', - 'bottle green': '#044a05', - 'watermelon': '#fd4659', - 'deep sky blue': '#0d75f8', - 'fire engine red': '#fe0002', - 'yellow ochre': '#cb9d06', - 'pumpkin orange': '#fb7d07', - 'pale olive': '#b9cc81', - 'light lilac': '#edc8ff', - 'lightish green': '#61e160', - 'carolina blue': '#8ab8fe', - 'mulberry': '#920a4e', - 'shocking pink': '#fe02a2', - 'auburn': '#9a3001', - 'bright lime green': '#65fe08', - 'celadon': '#befdb7', - 'pinkish brown': '#b17261', - 'poo brown': '#885f01', - 'bright sky blue': '#02ccfe', - 'celery': '#c1fd95', - 'dirt brown': '#836539', - 'strawberry': '#fb2943', - 'dark lime': '#84b701', - 'copper': '#b66325', - 'medium brown': '#7f5112', - 'muted green': '#5fa052', - "robin's egg": '#6dedfd', - 'bright aqua': '#0bf9ea', - 'bright lavender': '#c760ff', - 'ivory': '#ffffcb', - 'very light purple': '#f6cefc', - 'light navy': '#155084', - 'pink red': '#f5054f', - 'olive brown': '#645403', - 'poop brown': '#7a5901', - 'mustard green': '#a8b504', - 'ocean green': '#3d9973', - 'very dark blue': '#000133', - 'dusty green': '#76a973', - 'light navy blue': '#2e5a88', - 'minty green': '#0bf77d', - 'adobe': '#bd6c48', - 'barney': '#ac1db8', - 'jade green': '#2baf6a', - 'bright light blue': '#26f7fd', - 'light lime': '#aefd6c', - 'dark khaki': '#9b8f55', - 'orange yellow': '#ffad01', - 'ocre': '#c69c04', - 'maize': '#f4d054', - 'faded pink': '#de9dac', - 'british racing green': '#05480d', - 'sandstone': '#c9ae74', - 'mud brown': '#60460f', - 'light sea green': '#98f6b0', - 'robin egg blue': '#8af1fe', - 'aqua marine': '#2ee8bb', - 'dark sea green': '#11875d', - 'soft pink': '#fdb0c0', - 'orangey brown': '#b16002', - 'cherry red': '#f7022a', - 'burnt yellow': '#d5ab09', - 'brownish grey': '#86775f', - 'camel': '#c69f59', - 'purplish grey': '#7a687f', - 'marine': '#042e60', - 'greyish pink': '#c88d94', - 'pale turquoise': '#a5fbd5', - 'pastel yellow': '#fffe71', - 'bluey purple': '#6241c7', - 'canary yellow': '#fffe40', - 'faded red': '#d3494e', - 'sepia': '#985e2b', - 'coffee': '#a6814c', - 'bright magenta': '#ff08e8', - 'mocha': '#9d7651', - 'ecru': '#feffca', - 'purpleish': '#98568d', - 'cranberry': '#9e003a', - 'darkish green': '#287c37', - 'brown orange': '#b96902', - 'dusky rose': '#ba6873', - 'melon': '#ff7855', - 'sickly green': '#94b21c', - 'silver': '#c5c9c7', - 'purply blue': '#661aee', - 'purpleish blue': '#6140ef', - 'hospital green': '#9be5aa', - 'shit brown': '#7b5804', - 'mid blue': '#276ab3', - 'amber': '#feb308', - 'easter green': '#8cfd7e', - 'soft blue': '#6488ea', - 'cerulean blue': '#056eee', - 'golden brown': '#b27a01', - 'bright turquoise': '#0ffef9', - 'red pink': '#fa2a55', - 'red purple': '#820747', - 'greyish brown': '#7a6a4f', - 'vermillion': '#f4320c', - 'russet': '#a13905', - 'steel grey': '#6f828a', - 'lighter purple': '#a55af4', - 'bright violet': '#ad0afd', - 'prussian blue': '#004577', - 'slate green': '#658d6d', - 'dirty pink': '#ca7b80', - 'dark blue green': '#005249', - 'pine': '#2b5d34', - 'yellowy green': '#bff128', - 'dark gold': '#b59410', - 'bluish': '#2976bb', - 'darkish blue': '#014182', - 'dull red': '#bb3f3f', - 'pinky red': '#fc2647', - 'bronze': '#a87900', - 'pale teal': '#82cbb2', - 'military green': '#667c3e', - 'barbie pink': '#fe46a5', - 'bubblegum pink': '#fe83cc', - 'pea soup green': '#94a617', - 'dark mustard': '#a88905', - 'shit': '#7f5f00', - 'medium purple': '#9e43a2', - 'very dark green': '#062e03', - 'dirt': '#8a6e45', - 'dusky pink': '#cc7a8b', - 'red violet': '#9e0168', - 'lemon yellow': '#fdff38', - 'pistachio': '#c0fa8b', - 'dull yellow': '#eedc5b', - 'dark lime green': '#7ebd01', - 'denim blue': '#3b5b92', - 'teal blue': '#01889f', - 'lightish blue': '#3d7afd', - 'purpley blue': '#5f34e7', - 'light indigo': '#6d5acf', - 'swamp green': '#748500', - 'brown green': '#706c11', - 'dark maroon': '#3c0008', - 'hot purple': '#cb00f5', - 'dark forest green': '#002d04', - 'faded blue': '#658cbb', - 'drab green': '#749551', - 'light lime green': '#b9ff66', - 'snot green': '#9dc100', - 'yellowish': '#faee66', - 'light blue green': '#7efbb3', - 'bordeaux': '#7b002c', - 'light mauve': '#c292a1', - 'ocean': '#017b92', - 'marigold': '#fcc006', - 'muddy green': '#657432', - 'dull orange': '#d8863b', - 'steel': '#738595', - 'electric purple': '#aa23ff', - 'fluorescent green': '#08ff08', - 'yellowish brown': '#9b7a01', - 'blush': '#f29e8e', - 'soft green': '#6fc276', - 'bright orange': '#ff5b00', - 'lemon': '#fdff52', - 'purple grey': '#866f85', - 'acid green': '#8ffe09', - 'pale lavender': '#eecffe', - 'violet blue': '#510ac9', - 'light forest green': '#4f9153', - 'burnt red': '#9f2305', - 'khaki green': '#728639', - 'cerise': '#de0c62', - 'faded purple': '#916e99', - 'apricot': '#ffb16d', - 'dark olive green': '#3c4d03', - 'grey brown': '#7f7053', - 'green grey': '#77926f', - 'true blue': '#010fcc', - 'pale violet': '#ceaefa', - 'periwinkle blue': '#8f99fb', - 'light sky blue': '#c6fcff', - 'blurple': '#5539cc', - 'green brown': '#544e03', - 'bluegreen': '#017a79', - 'bright teal': '#01f9c6', - 'brownish yellow': '#c9b003', - 'pea soup': '#929901', - 'forest': '#0b5509', - 'barney purple': '#a00498', - 'ultramarine': '#2000b1', - 'purplish': '#94568c', - 'puke yellow': '#c2be0e', - 'bluish grey': '#748b97', - 'dark periwinkle': '#665fd1', - 'dark lilac': '#9c6da5', - 'reddish': '#c44240', - 'light maroon': '#a24857', - 'dusty purple': '#825f87', - 'terra cotta': '#c9643b', - 'avocado': '#90b134', - 'marine blue': '#01386a', - 'teal green': '#25a36f', - 'slate grey': '#59656d', - 'lighter green': '#75fd63', - 'electric green': '#21fc0d', - 'dusty blue': '#5a86ad', - 'golden yellow': '#fec615', - 'bright yellow': '#fffd01', - 'light lavender': '#dfc5fe', - 'umber': '#b26400', - 'poop': '#7f5e00', - 'dark peach': '#de7e5d', - 'jungle green': '#048243', - 'eggshell': '#ffffd4', - 'denim': '#3b638c', - 'yellow brown': '#b79400', - 'dull purple': '#84597e', - 'chocolate brown': '#411900', - 'wine red': '#7b0323', - 'neon blue': '#04d9ff', - 'dirty green': '#667e2c', - 'light tan': '#fbeeac', - 'ice blue': '#d7fffe', - 'cadet blue': '#4e7496', - 'dark mauve': '#874c62', - 'very light blue': '#d5ffff', - 'grey purple': '#826d8c', - 'pastel pink': '#ffbacd', - 'very light green': '#d1ffbd', - 'dark sky blue': '#448ee4', - 'evergreen': '#05472a', - 'dull pink': '#d5869d', - 'aubergine': '#3d0734', - 'mahogany': '#4a0100', - 'reddish orange': '#f8481c', - 'deep green': '#02590f', - 'vomit green': '#89a203', - 'purple pink': '#e03fd8', - 'dusty pink': '#d58a94', - 'faded green': '#7bb274', - 'camo green': '#526525', - 'pinky purple': '#c94cbe', - 'pink purple': '#db4bda', - 'brownish red': '#9e3623', - 'dark rose': '#b5485d', - 'mud': '#735c12', - 'brownish': '#9c6d57', - 'emerald green': '#028f1e', - 'pale brown': '#b1916e', - 'dull blue': '#49759c', - 'burnt umber': '#a0450e', - 'medium green': '#39ad48', - 'clay': '#b66a50', - 'light aqua': '#8cffdb', - 'light olive green': '#a4be5c', - 'brownish orange': '#cb7723', - 'dark aqua': '#05696b', - 'purplish pink': '#ce5dae', - 'dark salmon': '#c85a53', - 'greenish grey': '#96ae8d', - 'jade': '#1fa774', - 'ugly green': '#7a9703', - 'dark beige': '#ac9362', - 'emerald': '#01a049', - 'pale red': '#d9544d', - 'light magenta': '#fa5ff7', - 'sky': '#82cafc', - 'light cyan': '#acfffc', - 'yellow orange': '#fcb001', - 'reddish purple': '#910951', - 'reddish pink': '#fe2c54', - 'orchid': '#c875c4', - 'dirty yellow': '#cdc50a', - 'orange red': '#fd411e', - 'deep red': '#9a0200', - 'orange brown': '#be6400', - 'cobalt blue': '#030aa7', - 'neon pink': '#fe019a', - 'rose pink': '#f7879a', - 'greyish purple': '#887191', - 'raspberry': '#b00149', - 'aqua green': '#12e193', - 'salmon pink': '#fe7b7c', - 'tangerine': '#ff9408', - 'brownish green': '#6a6e09', - 'red brown': '#8b2e16', - 'greenish brown': '#696112', - 'pumpkin': '#e17701', - 'pine green': '#0a481e', - 'charcoal': '#343837', - 'baby pink': '#ffb7ce', - 'cornflower': '#6a79f7', - 'blue violet': '#5d06e9', - 'chocolate': '#3d1c02', - 'greyish green': '#82a67d', - 'scarlet': '#be0119', - 'green yellow': '#c9ff27', - 'dark olive': '#373e02', - 'sienna': '#a9561e', - 'pastel purple': '#caa0ff', - 'terracotta': '#ca6641', - 'aqua blue': '#02d8e9', - 'sage green': '#88b378', - 'blood red': '#980002', - 'deep pink': '#cb0162', - 'grass': '#5cac2d', - 'moss': '#769958', - 'pastel blue': '#a2bffe', - 'bluish green': '#10a674', - 'green blue': '#06b48b', - 'dark tan': '#af884a', - 'greenish blue': '#0b8b87', - 'pale orange': '#ffa756', - 'vomit': '#a2a415', - 'forrest green': '#154406', - 'dark lavender': '#856798', - 'dark violet': '#34013f', - 'purple blue': '#632de9', - 'dark cyan': '#0a888a', - 'olive drab': '#6f7632', - 'pinkish': '#d46a7e', - 'cobalt': '#1e488f', - 'neon purple': '#bc13fe', - 'light turquoise': '#7ef4cc', - 'apple green': '#76cd26', - 'dull green': '#74a662', - 'wine': '#80013f', - 'powder blue': '#b1d1fc', - 'off white': '#ffffe4', - 'electric blue': '#0652ff', - 'dark turquoise': '#045c5a', - 'blue purple': '#5729ce', - 'azure': '#069af3', - 'bright red': '#ff000d', - 'pinkish red': '#f10c45', - 'cornflower blue': '#5170d7', - 'light olive': '#acbf69', - 'grape': '#6c3461', - 'greyish blue': '#5e819d', - 'purplish blue': '#601ef9', - 'yellowish green': '#b0dd16', - 'greenish yellow': '#cdfd02', - 'medium blue': '#2c6fbb', - 'dusty rose': '#c0737a', - 'light violet': '#d6b4fc', - 'midnight blue': '#020035', - 'bluish purple': '#703be7', - 'red orange': '#fd3c06', - 'dark magenta': '#960056', - 'greenish': '#40a368', - 'ocean blue': '#03719c', - 'coral': '#fc5a50', - 'cream': '#ffffc2', - 'reddish brown': '#7f2b0a', - 'burnt sienna': '#b04e0f', - 'brick': '#a03623', - 'sage': '#87ae73', - 'grey green': '#789b73', - 'white': '#ffffff', - "robin's egg blue": '#98eff9', - 'moss green': '#658b38', - 'steel blue': '#5a7d9a', - 'eggplant': '#380835', - 'light yellow': '#fffe7a', - 'leaf green': '#5ca904', - 'light grey': '#d8dcd6', - 'puke': '#a5a502', - 'pinkish purple': '#d648d7', - 'sea blue': '#047495', - 'pale purple': '#b790d4', - 'slate blue': '#5b7c99', - 'blue grey': '#607c8e', - 'hunter green': '#0b4008', - 'fuchsia': '#ed0dd9', - 'crimson': '#8c000f', - 'pale yellow': '#ffff84', - 'ochre': '#bf9005', - 'mustard yellow': '#d2bd0a', - 'light red': '#ff474c', - 'cerulean': '#0485d1', - 'pale pink': '#ffcfdc', - 'deep blue': '#040273', - 'rust': '#a83c09', - 'light teal': '#90e4c1', - 'slate': '#516572', - 'goldenrod': '#fac205', - 'dark yellow': '#d5b60a', - 'dark grey': '#363737', - 'army green': '#4b5d16', - 'grey blue': '#6b8ba4', - 'seafoam': '#80f9ad', - 'puce': '#a57e52', - 'spring green': '#a9f971', - 'dark orange': '#c65102', - 'sand': '#e2ca76', - 'pastel green': '#b0ff9d', - 'mint': '#9ffeb0', - 'light orange': '#fdaa48', - 'bright pink': '#fe01b1', - 'chartreuse': '#c1f80a', - 'deep purple': '#36013f', - 'dark brown': '#341c02', - 'taupe': '#b9a281', - 'pea green': '#8eab12', - 'puke green': '#9aae07', - 'kelly green': '#02ab2e', - 'seafoam green': '#7af9ab', - 'blue green': '#137e6d', - 'khaki': '#aaa662', - 'burgundy': '#610023', - 'dark teal': '#014d4e', - 'brick red': '#8f1402', - 'royal purple': '#4b006e', - 'plum': '#580f41', - 'mint green': '#8fff9f', - 'gold': '#dbb40c', - 'baby blue': '#a2cffe', - 'yellow green': '#c0fb2d', - 'bright purple': '#be03fd', - 'dark red': '#840000', - 'pale blue': '#d0fefe', - 'grass green': '#3f9b0b', - 'navy': '#01153e', - 'aquamarine': '#04d8b2', - 'burnt orange': '#c04e01', - 'neon green': '#0cff0c', - 'bright blue': '#0165fc', - 'rose': '#cf6275', - 'light pink': '#ffd1df', - 'mustard': '#ceb301', - 'indigo': '#380282', - 'lime': '#aaff32', - 'sea green': '#53fca1', - 'periwinkle': '#8e82fe', - 'dark pink': '#cb416b', - 'olive green': '#677a04', - 'peach': '#ffb07c', - 'pale green': '#c7fdb5', - 'light brown': '#ad8150', - 'hot pink': '#ff028d', - 'black': '#000000', - 'lilac': '#cea2fd', - 'navy blue': '#001146', - 'royal blue': '#0504aa', - 'beige': '#e6daa6', - 'salmon': '#ff796c', - 'olive': '#6e750e', - 'maroon': '#650021', - 'bright green': '#01ff07', - 'dark purple': '#35063e', - 'mauve': '#ae7181', - 'forest green': '#06470c', - 'aqua': '#13eac9', - 'cyan': '#00ffff', - 'tan': '#d1b26f', - 'dark blue': '#00035b', - 'lavender': '#c79fef', - 'turquoise': '#06c2ac', - 'dark green': '#033500', - 'violet': '#9a0eea', - 'light purple': '#bf77f6', - 'lime green': '#89fe05', - 'grey': '#929591', - 'sky blue': '#75bbfd', - 'yellow': '#ffff14', - 'magenta': '#c20078', - 'light green': '#96f97b', - 'orange': '#f97306', - 'teal': '#029386', - 'light blue': '#95d0fc', - 'red': '#e50000', - 'brown': '#653700', - 'pink': '#ff81c0', - 'blue': '#0343df', - 'green': '#15b01a', - 'purple': '#7e1e9c'} - -# Normalize name to "xkcd:" to avoid name collisions. -XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()} - - -# https://drafts.csswg.org/css-color-4/#named-colors -CSS4_COLORS = { - 'aliceblue': '#F0F8FF', - 'antiquewhite': '#FAEBD7', - 'aqua': '#00FFFF', - 'aquamarine': '#7FFFD4', - 'azure': '#F0FFFF', - 'beige': '#F5F5DC', - 'bisque': '#FFE4C4', - 'black': '#000000', - 'blanchedalmond': '#FFEBCD', - 'blue': '#0000FF', - 'blueviolet': '#8A2BE2', - 'brown': '#A52A2A', - 'burlywood': '#DEB887', - 'cadetblue': '#5F9EA0', - 'chartreuse': '#7FFF00', - 'chocolate': '#D2691E', - 'coral': '#FF7F50', - 'cornflowerblue': '#6495ED', - 'cornsilk': '#FFF8DC', - 'crimson': '#DC143C', - 'cyan': '#00FFFF', - 'darkblue': '#00008B', - 'darkcyan': '#008B8B', - 'darkgoldenrod': '#B8860B', - 'darkgray': '#A9A9A9', - 'darkgreen': '#006400', - 'darkgrey': '#A9A9A9', - 'darkkhaki': '#BDB76B', - 'darkmagenta': '#8B008B', - 'darkolivegreen': '#556B2F', - 'darkorange': '#FF8C00', - 'darkorchid': '#9932CC', - 'darkred': '#8B0000', - 'darksalmon': '#E9967A', - 'darkseagreen': '#8FBC8F', - 'darkslateblue': '#483D8B', - 'darkslategray': '#2F4F4F', - 'darkslategrey': '#2F4F4F', - 'darkturquoise': '#00CED1', - 'darkviolet': '#9400D3', - 'deeppink': '#FF1493', - 'deepskyblue': '#00BFFF', - 'dimgray': '#696969', - 'dimgrey': '#696969', - 'dodgerblue': '#1E90FF', - 'firebrick': '#B22222', - 'floralwhite': '#FFFAF0', - 'forestgreen': '#228B22', - 'fuchsia': '#FF00FF', - 'gainsboro': '#DCDCDC', - 'ghostwhite': '#F8F8FF', - 'gold': '#FFD700', - 'goldenrod': '#DAA520', - 'gray': '#808080', - 'green': '#008000', - 'greenyellow': '#ADFF2F', - 'grey': '#808080', - 'honeydew': '#F0FFF0', - 'hotpink': '#FF69B4', - 'indianred': '#CD5C5C', - 'indigo': '#4B0082', - 'ivory': '#FFFFF0', - 'khaki': '#F0E68C', - 'lavender': '#E6E6FA', - 'lavenderblush': '#FFF0F5', - 'lawngreen': '#7CFC00', - 'lemonchiffon': '#FFFACD', - 'lightblue': '#ADD8E6', - 'lightcoral': '#F08080', - 'lightcyan': '#E0FFFF', - 'lightgoldenrodyellow': '#FAFAD2', - 'lightgray': '#D3D3D3', - 'lightgreen': '#90EE90', - 'lightgrey': '#D3D3D3', - 'lightpink': '#FFB6C1', - 'lightsalmon': '#FFA07A', - 'lightseagreen': '#20B2AA', - 'lightskyblue': '#87CEFA', - 'lightslategray': '#778899', - 'lightslategrey': '#778899', - 'lightsteelblue': '#B0C4DE', - 'lightyellow': '#FFFFE0', - 'lime': '#00FF00', - 'limegreen': '#32CD32', - 'linen': '#FAF0E6', - 'magenta': '#FF00FF', - 'maroon': '#800000', - 'mediumaquamarine': '#66CDAA', - 'mediumblue': '#0000CD', - 'mediumorchid': '#BA55D3', - 'mediumpurple': '#9370DB', - 'mediumseagreen': '#3CB371', - 'mediumslateblue': '#7B68EE', - 'mediumspringgreen': '#00FA9A', - 'mediumturquoise': '#48D1CC', - 'mediumvioletred': '#C71585', - 'midnightblue': '#191970', - 'mintcream': '#F5FFFA', - 'mistyrose': '#FFE4E1', - 'moccasin': '#FFE4B5', - 'navajowhite': '#FFDEAD', - 'navy': '#000080', - 'oldlace': '#FDF5E6', - 'olive': '#808000', - 'olivedrab': '#6B8E23', - 'orange': '#FFA500', - 'orangered': '#FF4500', - 'orchid': '#DA70D6', - 'palegoldenrod': '#EEE8AA', - 'palegreen': '#98FB98', - 'paleturquoise': '#AFEEEE', - 'palevioletred': '#DB7093', - 'papayawhip': '#FFEFD5', - 'peachpuff': '#FFDAB9', - 'peru': '#CD853F', - 'pink': '#FFC0CB', - 'plum': '#DDA0DD', - 'powderblue': '#B0E0E6', - 'purple': '#800080', - 'rebeccapurple': '#663399', - 'red': '#FF0000', - 'rosybrown': '#BC8F8F', - 'royalblue': '#4169E1', - 'saddlebrown': '#8B4513', - 'salmon': '#FA8072', - 'sandybrown': '#F4A460', - 'seagreen': '#2E8B57', - 'seashell': '#FFF5EE', - 'sienna': '#A0522D', - 'silver': '#C0C0C0', - 'skyblue': '#87CEEB', - 'slateblue': '#6A5ACD', - 'slategray': '#708090', - 'slategrey': '#708090', - 'snow': '#FFFAFA', - 'springgreen': '#00FF7F', - 'steelblue': '#4682B4', - 'tan': '#D2B48C', - 'teal': '#008080', - 'thistle': '#D8BFD8', - 'tomato': '#FF6347', - 'turquoise': '#40E0D0', - 'violet': '#EE82EE', - 'wheat': '#F5DEB3', - 'white': '#FFFFFF', - 'whitesmoke': '#F5F5F5', - 'yellow': '#FFFF00', - 'yellowgreen': '#9ACD32'} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/patches.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/patches.py deleted file mode 100644 index 2b4e0dc6e6a919f9b422eed1329702c711889b93..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/patches.py +++ /dev/null @@ -1,4607 +0,0 @@ -r""" -Patches are `.Artist`\s with a face color and an edge color. -""" - -import functools -import inspect -import math -from numbers import Number -import textwrap -from types import SimpleNamespace -from collections import namedtuple -from matplotlib.transforms import Affine2D - -import numpy as np - -import matplotlib as mpl -from . import (_api, artist, cbook, colors, _docstring, hatch as mhatch, - lines as mlines, transforms) -from .bezier import ( - NonIntersectingPathException, get_cos_sin, get_intersection, - get_parallels, inside_circle, make_wedged_bezier2, - split_bezier_intersecting_with_closedpath, split_path_inout) -from .path import Path -from ._enums import JoinStyle, CapStyle - - -@_docstring.interpd -@_api.define_aliases({ - "antialiased": ["aa"], - "edgecolor": ["ec"], - "facecolor": ["fc"], - "linestyle": ["ls"], - "linewidth": ["lw"], -}) -class Patch(artist.Artist): - """ - A patch is a 2D artist with a face color and an edge color. - - If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased* - are *None*, they default to their rc params setting. - """ - zorder = 1 - - # Whether to draw an edge by default. Set on a - # subclass-by-subclass basis. - _edge_default = False - - @_api.make_keyword_only("3.6", name="edgecolor") - def __init__(self, - edgecolor=None, - facecolor=None, - color=None, - linewidth=None, - linestyle=None, - antialiased=None, - hatch=None, - fill=True, - capstyle=None, - joinstyle=None, - **kwargs): - """ - The following kwarg properties are supported - - %(Patch:kwdoc)s - """ - super().__init__() - - if linestyle is None: - linestyle = "solid" - if capstyle is None: - capstyle = CapStyle.butt - if joinstyle is None: - joinstyle = JoinStyle.miter - - self._hatch_color = colors.to_rgba(mpl.rcParams['hatch.color']) - self._fill = True # needed for set_facecolor call - if color is not None: - if edgecolor is not None or facecolor is not None: - _api.warn_external( - "Setting the 'color' property will override " - "the edgecolor or facecolor properties.") - self.set_color(color) - else: - self.set_edgecolor(edgecolor) - self.set_facecolor(facecolor) - - self._linewidth = 0 - self._unscaled_dash_pattern = (0, None) # offset, dash - self._dash_pattern = (0, None) # offset, dash (scaled by linewidth) - - self.set_fill(fill) - self.set_linestyle(linestyle) - self.set_linewidth(linewidth) - self.set_antialiased(antialiased) - self.set_hatch(hatch) - self.set_capstyle(capstyle) - self.set_joinstyle(joinstyle) - - if len(kwargs): - self._internal_update(kwargs) - - def get_verts(self): - """ - Return a copy of the vertices used in this patch. - - If the patch contains Bézier curves, the curves will be interpolated by - line segments. To access the curves as curves, use `get_path`. - """ - trans = self.get_transform() - path = self.get_path() - polygons = path.to_polygons(trans) - if len(polygons): - return polygons[0] - return [] - - def _process_radius(self, radius): - if radius is not None: - return radius - if isinstance(self._picker, Number): - _radius = self._picker - else: - if self.get_edgecolor()[3] == 0: - _radius = 0 - else: - _radius = self.get_linewidth() - return _radius - - def contains(self, mouseevent, radius=None): - """ - Test whether the mouse event occurred in the patch. - - Returns - ------- - (bool, empty dict) - """ - inside, info = self._default_contains(mouseevent) - if inside is not None: - return inside, info - radius = self._process_radius(radius) - codes = self.get_path().codes - if codes is not None: - vertices = self.get_path().vertices - # if the current path is concatenated by multiple sub paths. - # get the indexes of the starting code(MOVETO) of all sub paths - idxs, = np.where(codes == Path.MOVETO) - # Don't split before the first MOVETO. - idxs = idxs[1:] - subpaths = map( - Path, np.split(vertices, idxs), np.split(codes, idxs)) - else: - subpaths = [self.get_path()] - inside = any( - subpath.contains_point( - (mouseevent.x, mouseevent.y), self.get_transform(), radius) - for subpath in subpaths) - return inside, {} - - def contains_point(self, point, radius=None): - """ - Return whether the given point is inside the patch. - - Parameters - ---------- - point : (float, float) - The point (x, y) to check, in target coordinates of - ``self.get_transform()``. These are display coordinates for patches - that are added to a figure or axes. - radius : float, optional - Additional margin on the patch in target coordinates of - ``self.get_transform()``. See `.Path.contains_point` for further - details. - - Returns - ------- - bool - - Notes - ----- - The proper use of this method depends on the transform of the patch. - Isolated patches do not have a transform. In this case, the patch - creation coordinates and the point coordinates match. The following - example checks that the center of a circle is within the circle - - >>> center = 0, 0 - >>> c = Circle(center, radius=1) - >>> c.contains_point(center) - True - - The convention of checking against the transformed patch stems from - the fact that this method is predominantly used to check if display - coordinates (e.g. from mouse events) are within the patch. If you want - to do the above check with data coordinates, you have to properly - transform them first: - - >>> center = 0, 0 - >>> c = Circle(center, radius=1) - >>> plt.gca().add_patch(c) - >>> transformed_center = c.get_transform().transform(center) - >>> c.contains_point(transformed_center) - True - - """ - radius = self._process_radius(radius) - return self.get_path().contains_point(point, - self.get_transform(), - radius) - - def contains_points(self, points, radius=None): - """ - Return whether the given points are inside the patch. - - Parameters - ---------- - points : (N, 2) array - The points to check, in target coordinates of - ``self.get_transform()``. These are display coordinates for patches - that are added to a figure or axes. Columns contain x and y values. - radius : float, optional - Additional margin on the patch in target coordinates of - ``self.get_transform()``. See `.Path.contains_point` for further - details. - - Returns - ------- - length-N bool array - - Notes - ----- - The proper use of this method depends on the transform of the patch. - See the notes on `.Patch.contains_point`. - """ - radius = self._process_radius(radius) - return self.get_path().contains_points(points, - self.get_transform(), - radius) - - def update_from(self, other): - # docstring inherited. - super().update_from(other) - # For some properties we don't need or don't want to go through the - # getters/setters, so we just copy them directly. - self._edgecolor = other._edgecolor - self._facecolor = other._facecolor - self._original_edgecolor = other._original_edgecolor - self._original_facecolor = other._original_facecolor - self._fill = other._fill - self._hatch = other._hatch - self._hatch_color = other._hatch_color - self._unscaled_dash_pattern = other._unscaled_dash_pattern - self.set_linewidth(other._linewidth) # also sets scaled dashes - self.set_transform(other.get_data_transform()) - # If the transform of other needs further initialization, then it will - # be the case for this artist too. - self._transformSet = other.is_transform_set() - - def get_extents(self): - """ - Return the `Patch`'s axis-aligned extents as a `~.transforms.Bbox`. - """ - return self.get_path().get_extents(self.get_transform()) - - def get_transform(self): - """Return the `~.transforms.Transform` applied to the `Patch`.""" - return self.get_patch_transform() + artist.Artist.get_transform(self) - - def get_data_transform(self): - """ - Return the `~.transforms.Transform` mapping data coordinates to - physical coordinates. - """ - return artist.Artist.get_transform(self) - - def get_patch_transform(self): - """ - Return the `~.transforms.Transform` instance mapping patch coordinates - to data coordinates. - - For example, one may define a patch of a circle which represents a - radius of 5 by providing coordinates for a unit circle, and a - transform which scales the coordinates (the patch coordinate) by 5. - """ - return transforms.IdentityTransform() - - def get_antialiased(self): - """Return whether antialiasing is used for drawing.""" - return self._antialiased - - def get_edgecolor(self): - """Return the edge color.""" - return self._edgecolor - - def get_facecolor(self): - """Return the face color.""" - return self._facecolor - - def get_linewidth(self): - """Return the line width in points.""" - return self._linewidth - - def get_linestyle(self): - """Return the linestyle.""" - return self._linestyle - - def set_antialiased(self, aa): - """ - Set whether to use antialiased rendering. - - Parameters - ---------- - aa : bool or None - """ - if aa is None: - aa = mpl.rcParams['patch.antialiased'] - self._antialiased = aa - self.stale = True - - def _set_edgecolor(self, color): - set_hatch_color = True - if color is None: - if (mpl.rcParams['patch.force_edgecolor'] or - not self._fill or self._edge_default): - color = mpl.rcParams['patch.edgecolor'] - else: - color = 'none' - set_hatch_color = False - - self._edgecolor = colors.to_rgba(color, self._alpha) - if set_hatch_color: - self._hatch_color = self._edgecolor - self.stale = True - - def set_edgecolor(self, color): - """ - Set the patch edge color. - - Parameters - ---------- - color : color or None - """ - self._original_edgecolor = color - self._set_edgecolor(color) - - def _set_facecolor(self, color): - if color is None: - color = mpl.rcParams['patch.facecolor'] - alpha = self._alpha if self._fill else 0 - self._facecolor = colors.to_rgba(color, alpha) - self.stale = True - - def set_facecolor(self, color): - """ - Set the patch face color. - - Parameters - ---------- - color : color or None - """ - self._original_facecolor = color - self._set_facecolor(color) - - def set_color(self, c): - """ - Set both the edgecolor and the facecolor. - - Parameters - ---------- - c : color - - See Also - -------- - Patch.set_facecolor, Patch.set_edgecolor - For setting the edge or face color individually. - """ - self.set_facecolor(c) - self.set_edgecolor(c) - - def set_alpha(self, alpha): - # docstring inherited - super().set_alpha(alpha) - self._set_facecolor(self._original_facecolor) - self._set_edgecolor(self._original_edgecolor) - # stale is already True - - def set_linewidth(self, w): - """ - Set the patch linewidth in points. - - Parameters - ---------- - w : float or None - """ - if w is None: - w = mpl.rcParams['patch.linewidth'] - self._linewidth = float(w) - self._dash_pattern = mlines._scale_dashes( - *self._unscaled_dash_pattern, w) - self.stale = True - - def set_linestyle(self, ls): - """ - Set the patch linestyle. - - ========================================== ================= - linestyle description - ========================================== ================= - ``'-'`` or ``'solid'`` solid line - ``'--'`` or ``'dashed'`` dashed line - ``'-.'`` or ``'dashdot'`` dash-dotted line - ``':'`` or ``'dotted'`` dotted line - ``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing - ========================================== ================= - - Alternatively a dash tuple of the following form can be provided:: - - (offset, onoffseq) - - where ``onoffseq`` is an even length tuple of on and off ink in points. - - Parameters - ---------- - ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} - The line style. - """ - if ls is None: - ls = "solid" - if ls in [' ', '', 'none']: - ls = 'None' - self._linestyle = ls - self._unscaled_dash_pattern = mlines._get_dash_pattern(ls) - self._dash_pattern = mlines._scale_dashes( - *self._unscaled_dash_pattern, self._linewidth) - self.stale = True - - def set_fill(self, b): - """ - Set whether to fill the patch. - - Parameters - ---------- - b : bool - """ - self._fill = bool(b) - self._set_facecolor(self._original_facecolor) - self._set_edgecolor(self._original_edgecolor) - self.stale = True - - def get_fill(self): - """Return whether the patch is filled.""" - return self._fill - - # Make fill a property so as to preserve the long-standing - # but somewhat inconsistent behavior in which fill was an - # attribute. - fill = property(get_fill, set_fill) - - @_docstring.interpd - def set_capstyle(self, s): - """ - Set the `.CapStyle`. - - The default capstyle is 'round' for `.FancyArrowPatch` and 'butt' for - all other patches. - - Parameters - ---------- - s : `.CapStyle` or %(CapStyle)s - """ - cs = CapStyle(s) - self._capstyle = cs - self.stale = True - - def get_capstyle(self): - """Return the capstyle.""" - return self._capstyle.name - - @_docstring.interpd - def set_joinstyle(self, s): - """ - Set the `.JoinStyle`. - - The default joinstyle is 'round' for `.FancyArrowPatch` and 'miter' for - all other patches. - - Parameters - ---------- - s : `.JoinStyle` or %(JoinStyle)s - """ - js = JoinStyle(s) - self._joinstyle = js - self.stale = True - - def get_joinstyle(self): - """Return the joinstyle.""" - return self._joinstyle.name - - def set_hatch(self, hatch): - r""" - Set the hatching pattern. - - *hatch* can be one of:: - - / - diagonal hatching - \ - back diagonal - | - vertical - - - horizontal - + - crossed - x - crossed diagonal - o - small circle - O - large circle - . - dots - * - stars - - Letters can be combined, in which case all the specified - hatchings are done. If same letter repeats, it increases the - density of hatching of that pattern. - - Hatching is supported in the PostScript, PDF, SVG and Agg - backends only. - - Parameters - ---------- - hatch : {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'} - """ - # Use validate_hatch(list) after deprecation. - mhatch._validate_hatch_pattern(hatch) - self._hatch = hatch - self.stale = True - - def get_hatch(self): - """Return the hatching pattern.""" - return self._hatch - - def _draw_paths_with_artist_properties( - self, renderer, draw_path_args_list): - """ - ``draw()`` helper factored out for sharing with `FancyArrowPatch`. - - Configure *renderer* and the associated graphics context *gc* - from the artist properties, then repeatedly call - ``renderer.draw_path(gc, *draw_path_args)`` for each tuple - *draw_path_args* in *draw_path_args_list*. - """ - - renderer.open_group('patch', self.get_gid()) - gc = renderer.new_gc() - - gc.set_foreground(self._edgecolor, isRGBA=True) - - lw = self._linewidth - if self._edgecolor[3] == 0 or self._linestyle == 'None': - lw = 0 - gc.set_linewidth(lw) - gc.set_dashes(*self._dash_pattern) - gc.set_capstyle(self._capstyle) - gc.set_joinstyle(self._joinstyle) - - gc.set_antialiased(self._antialiased) - self._set_gc_clip(gc) - gc.set_url(self._url) - gc.set_snap(self.get_snap()) - - gc.set_alpha(self._alpha) - - if self._hatch: - gc.set_hatch(self._hatch) - gc.set_hatch_color(self._hatch_color) - - if self.get_sketch_params() is not None: - gc.set_sketch_params(*self.get_sketch_params()) - - if self.get_path_effects(): - from matplotlib.patheffects import PathEffectRenderer - renderer = PathEffectRenderer(self.get_path_effects(), renderer) - - for draw_path_args in draw_path_args_list: - renderer.draw_path(gc, *draw_path_args) - - gc.restore() - renderer.close_group('patch') - self.stale = False - - @artist.allow_rasterization - def draw(self, renderer): - # docstring inherited - if not self.get_visible(): - return - path = self.get_path() - transform = self.get_transform() - tpath = transform.transform_path_non_affine(path) - affine = transform.get_affine() - self._draw_paths_with_artist_properties( - renderer, - [(tpath, affine, - # Work around a bug in the PDF and SVG renderers, which - # do not draw the hatches if the facecolor is fully - # transparent, but do if it is None. - self._facecolor if self._facecolor[3] else None)]) - - def get_path(self): - """Return the path of this patch.""" - raise NotImplementedError('Derived must override') - - def get_window_extent(self, renderer=None): - return self.get_path().get_extents(self.get_transform()) - - def _convert_xy_units(self, xy): - """Convert x and y units for a tuple (x, y).""" - x = self.convert_xunits(xy[0]) - y = self.convert_yunits(xy[1]) - return x, y - - -class Shadow(Patch): - def __str__(self): - return f"Shadow({self.patch})" - - @_docstring.dedent_interpd - def __init__(self, patch, ox, oy, **kwargs): - """ - Create a shadow of the given *patch*. - - By default, the shadow will have the same face color as the *patch*, - but darkened. - - Parameters - ---------- - patch : `.Patch` - The patch to create the shadow for. - ox, oy : float - The shift of the shadow in data coordinates, scaled by a factor - of dpi/72. - **kwargs - Properties of the shadow patch. Supported keys are: - - %(Patch:kwdoc)s - """ - super().__init__() - self.patch = patch - self._ox, self._oy = ox, oy - self._shadow_transform = transforms.Affine2D() - - self.update_from(self.patch) - color = .3 * np.asarray(colors.to_rgb(self.patch.get_facecolor())) - self.update({'facecolor': color, 'edgecolor': color, 'alpha': 0.5, - # Place shadow patch directly behind the inherited patch. - 'zorder': np.nextafter(self.patch.zorder, -np.inf), - **kwargs}) - - def _update_transform(self, renderer): - ox = renderer.points_to_pixels(self._ox) - oy = renderer.points_to_pixels(self._oy) - self._shadow_transform.clear().translate(ox, oy) - - def get_path(self): - return self.patch.get_path() - - def get_patch_transform(self): - return self.patch.get_patch_transform() + self._shadow_transform - - def draw(self, renderer): - self._update_transform(renderer) - super().draw(renderer) - - -class Rectangle(Patch): - """ - A rectangle defined via an anchor point *xy* and its *width* and *height*. - - The rectangle extends from ``xy[0]`` to ``xy[0] + width`` in x-direction - and from ``xy[1]`` to ``xy[1] + height`` in y-direction. :: - - : +------------------+ - : | | - : height | - : | | - : (xy)---- width -----+ - - One may picture *xy* as the bottom left corner, but which corner *xy* is - actually depends on the direction of the axis and the sign of *width* - and *height*; e.g. *xy* would be the bottom right corner if the x-axis - was inverted or if *width* was negative. - """ - - def __str__(self): - pars = self._x0, self._y0, self._width, self._height, self.angle - fmt = "Rectangle(xy=(%g, %g), width=%g, height=%g, angle=%g)" - return fmt % pars - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="angle") - def __init__(self, xy, width, height, angle=0.0, *, - rotation_point='xy', **kwargs): - """ - Parameters - ---------- - xy : (float, float) - The anchor point. - width : float - Rectangle width. - height : float - Rectangle height. - angle : float, default: 0 - Rotation in degrees anti-clockwise about the rotation point. - rotation_point : {'xy', 'center', (number, number)}, default: 'xy' - If ``'xy'``, rotate around the anchor point. If ``'center'`` rotate - around the center. If 2-tuple of number, rotate around this - coordinate. - - Other Parameters - ---------------- - **kwargs : `.Patch` properties - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - self._x0 = xy[0] - self._y0 = xy[1] - self._width = width - self._height = height - self.angle = float(angle) - self.rotation_point = rotation_point - # Required for RectangleSelector with axes aspect ratio != 1 - # The patch is defined in data coordinates and when changing the - # selector with square modifier and not in data coordinates, we need - # to correct for the aspect ratio difference between the data and - # display coordinate systems. Its value is typically provide by - # Axes._get_aspect_ratio() - self._aspect_ratio_correction = 1.0 - self._convert_units() # Validate the inputs. - - def get_path(self): - """Return the vertices of the rectangle.""" - return Path.unit_rectangle() - - def _convert_units(self): - """Convert bounds of the rectangle.""" - x0 = self.convert_xunits(self._x0) - y0 = self.convert_yunits(self._y0) - x1 = self.convert_xunits(self._x0 + self._width) - y1 = self.convert_yunits(self._y0 + self._height) - return x0, y0, x1, y1 - - def get_patch_transform(self): - # Note: This cannot be called until after this has been added to - # an Axes, otherwise unit conversion will fail. This makes it very - # important to call the accessor method and not directly access the - # transformation member variable. - bbox = self.get_bbox() - if self.rotation_point == 'center': - width, height = bbox.x1 - bbox.x0, bbox.y1 - bbox.y0 - rotation_point = bbox.x0 + width / 2., bbox.y0 + height / 2. - elif self.rotation_point == 'xy': - rotation_point = bbox.x0, bbox.y0 - else: - rotation_point = self.rotation_point - return transforms.BboxTransformTo(bbox) \ - + transforms.Affine2D() \ - .translate(-rotation_point[0], -rotation_point[1]) \ - .scale(1, self._aspect_ratio_correction) \ - .rotate_deg(self.angle) \ - .scale(1, 1 / self._aspect_ratio_correction) \ - .translate(*rotation_point) - - @property - def rotation_point(self): - """The rotation point of the patch.""" - return self._rotation_point - - @rotation_point.setter - def rotation_point(self, value): - if value in ['center', 'xy'] or ( - isinstance(value, tuple) and len(value) == 2 and - isinstance(value[0], Number) and isinstance(value[1], Number) - ): - self._rotation_point = value - else: - raise ValueError("`rotation_point` must be one of " - "{'xy', 'center', (number, number)}.") - - def get_x(self): - """Return the left coordinate of the rectangle.""" - return self._x0 - - def get_y(self): - """Return the bottom coordinate of the rectangle.""" - return self._y0 - - def get_xy(self): - """Return the left and bottom coords of the rectangle as a tuple.""" - return self._x0, self._y0 - - def get_corners(self): - """ - Return the corners of the rectangle, moving anti-clockwise from - (x0, y0). - """ - return self.get_patch_transform().transform( - [(0, 0), (1, 0), (1, 1), (0, 1)]) - - def get_center(self): - """Return the centre of the rectangle.""" - return self.get_patch_transform().transform((0.5, 0.5)) - - def get_width(self): - """Return the width of the rectangle.""" - return self._width - - def get_height(self): - """Return the height of the rectangle.""" - return self._height - - def get_angle(self): - """Get the rotation angle in degrees.""" - return self.angle - - def set_x(self, x): - """Set the left coordinate of the rectangle.""" - self._x0 = x - self.stale = True - - def set_y(self, y): - """Set the bottom coordinate of the rectangle.""" - self._y0 = y - self.stale = True - - def set_angle(self, angle): - """ - Set the rotation angle in degrees. - - The rotation is performed anti-clockwise around *xy*. - """ - self.angle = angle - self.stale = True - - def set_xy(self, xy): - """ - Set the left and bottom coordinates of the rectangle. - - Parameters - ---------- - xy : (float, float) - """ - self._x0, self._y0 = xy - self.stale = True - - def set_width(self, w): - """Set the width of the rectangle.""" - self._width = w - self.stale = True - - def set_height(self, h): - """Set the height of the rectangle.""" - self._height = h - self.stale = True - - def set_bounds(self, *args): - """ - Set the bounds of the rectangle as *left*, *bottom*, *width*, *height*. - - The values may be passed as separate parameters or as a tuple:: - - set_bounds(left, bottom, width, height) - set_bounds((left, bottom, width, height)) - - .. ACCEPTS: (left, bottom, width, height) - """ - if len(args) == 1: - l, b, w, h = args[0] - else: - l, b, w, h = args - self._x0 = l - self._y0 = b - self._width = w - self._height = h - self.stale = True - - def get_bbox(self): - """Return the `.Bbox`.""" - x0, y0, x1, y1 = self._convert_units() - return transforms.Bbox.from_extents(x0, y0, x1, y1) - - xy = property(get_xy, set_xy) - - -class RegularPolygon(Patch): - """A regular polygon patch.""" - - def __str__(self): - s = "RegularPolygon((%g, %g), %d, radius=%g, orientation=%g)" - return s % (self.xy[0], self.xy[1], self.numvertices, self.radius, - self.orientation) - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="radius") - def __init__(self, xy, numVertices, radius=5, orientation=0, - **kwargs): - """ - Parameters - ---------- - xy : (float, float) - The center position. - - numVertices : int - The number of vertices. - - radius : float - The distance from the center to each of the vertices. - - orientation : float - The polygon rotation angle (in radians). - - **kwargs - `Patch` properties: - - %(Patch:kwdoc)s - """ - self.xy = xy - self.numvertices = numVertices - self.orientation = orientation - self.radius = radius - self._path = Path.unit_regular_polygon(numVertices) - self._patch_transform = transforms.Affine2D() - super().__init__(**kwargs) - - def get_path(self): - return self._path - - def get_patch_transform(self): - return self._patch_transform.clear() \ - .scale(self.radius) \ - .rotate(self.orientation) \ - .translate(*self.xy) - - -class PathPatch(Patch): - """A general polycurve path patch.""" - - _edge_default = True - - def __str__(self): - s = "PathPatch%d((%g, %g) ...)" - return s % (len(self._path.vertices), *tuple(self._path.vertices[0])) - - @_docstring.dedent_interpd - def __init__(self, path, **kwargs): - """ - *path* is a `.Path` object. - - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - self._path = path - - def get_path(self): - return self._path - - def set_path(self, path): - self._path = path - - -class StepPatch(PathPatch): - """ - A path patch describing a stepwise constant function. - - By default, the path is not closed and starts and stops at - baseline value. - """ - - _edge_default = False - - @_docstring.dedent_interpd - def __init__(self, values, edges, *, - orientation='vertical', baseline=0, **kwargs): - """ - Parameters - ---------- - values : array-like - The step heights. - - edges : array-like - The edge positions, with ``len(edges) == len(vals) + 1``, - between which the curve takes on vals values. - - orientation : {'vertical', 'horizontal'}, default: 'vertical' - The direction of the steps. Vertical means that *values* are - along the y-axis, and edges are along the x-axis. - - baseline : float, array-like or None, default: 0 - The bottom value of the bounding edges or when - ``fill=True``, position of lower edge. If *fill* is - True or an array is passed to *baseline*, a closed - path is drawn. - - Other valid keyword arguments are: - - %(Patch:kwdoc)s - """ - self.orientation = orientation - self._edges = np.asarray(edges) - self._values = np.asarray(values) - self._baseline = np.asarray(baseline) if baseline is not None else None - self._update_path() - super().__init__(self._path, **kwargs) - - def _update_path(self): - if np.isnan(np.sum(self._edges)): - raise ValueError('Nan values in "edges" are disallowed') - if self._edges.size - 1 != self._values.size: - raise ValueError('Size mismatch between "values" and "edges". ' - "Expected `len(values) + 1 == len(edges)`, but " - f"`len(values) = {self._values.size}` and " - f"`len(edges) = {self._edges.size}`.") - # Initializing with empty arrays allows supporting empty stairs. - verts, codes = [np.empty((0, 2))], [np.empty(0, dtype=Path.code_type)] - - _nan_mask = np.isnan(self._values) - if self._baseline is not None: - _nan_mask |= np.isnan(self._baseline) - for idx0, idx1 in cbook.contiguous_regions(~_nan_mask): - x = np.repeat(self._edges[idx0:idx1+1], 2) - y = np.repeat(self._values[idx0:idx1], 2) - if self._baseline is None: - y = np.concatenate([y[:1], y, y[-1:]]) - elif self._baseline.ndim == 0: # single baseline value - y = np.concatenate([[self._baseline], y, [self._baseline]]) - elif self._baseline.ndim == 1: # baseline array - base = np.repeat(self._baseline[idx0:idx1], 2)[::-1] - x = np.concatenate([x, x[::-1]]) - y = np.concatenate([base[-1:], y, base[:1], - base[:1], base, base[-1:]]) - else: # no baseline - raise ValueError('Invalid `baseline` specified') - if self.orientation == 'vertical': - xy = np.column_stack([x, y]) - else: - xy = np.column_stack([y, x]) - verts.append(xy) - codes.append([Path.MOVETO] + [Path.LINETO]*(len(xy)-1)) - self._path = Path(np.concatenate(verts), np.concatenate(codes)) - - def get_data(self): - """Get `.StepPatch` values, edges and baseline as namedtuple.""" - StairData = namedtuple('StairData', 'values edges baseline') - return StairData(self._values, self._edges, self._baseline) - - def set_data(self, values=None, edges=None, baseline=None): - """ - Set `.StepPatch` values, edges and baseline. - - Parameters - ---------- - values : 1D array-like or None - Will not update values, if passing None - edges : 1D array-like, optional - baseline : float, 1D array-like or None - """ - if values is None and edges is None and baseline is None: - raise ValueError("Must set *values*, *edges* or *baseline*.") - if values is not None: - self._values = np.asarray(values) - if edges is not None: - self._edges = np.asarray(edges) - if baseline is not None: - self._baseline = np.asarray(baseline) - self._update_path() - self.stale = True - - -class Polygon(Patch): - """A general polygon patch.""" - - def __str__(self): - if len(self._path.vertices): - s = "Polygon%d((%g, %g) ...)" - return s % (len(self._path.vertices), *self._path.vertices[0]) - else: - return "Polygon0()" - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="closed") - def __init__(self, xy, closed=True, **kwargs): - """ - *xy* is a numpy array with shape Nx2. - - If *closed* is *True*, the polygon will be closed so the - starting and ending points are the same. - - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - self._closed = closed - self.set_xy(xy) - - def get_path(self): - """Get the `.Path` of the polygon.""" - return self._path - - def get_closed(self): - """Return whether the polygon is closed.""" - return self._closed - - def set_closed(self, closed): - """ - Set whether the polygon is closed. - - Parameters - ---------- - closed : bool - True if the polygon is closed - """ - if self._closed == bool(closed): - return - self._closed = bool(closed) - self.set_xy(self.get_xy()) - self.stale = True - - def get_xy(self): - """ - Get the vertices of the path. - - Returns - ------- - (N, 2) numpy array - The coordinates of the vertices. - """ - return self._path.vertices - - def set_xy(self, xy): - """ - Set the vertices of the polygon. - - Parameters - ---------- - xy : (N, 2) array-like - The coordinates of the vertices. - - Notes - ----- - Unlike `.Path`, we do not ignore the last input vertex. If the - polygon is meant to be closed, and the last point of the polygon is not - equal to the first, we assume that the user has not explicitly passed a - ``CLOSEPOLY`` vertex, and add it ourselves. - """ - xy = np.asarray(xy) - nverts, _ = xy.shape - if self._closed: - # if the first and last vertex are the "same", then we assume that - # the user explicitly passed the CLOSEPOLY vertex. Otherwise, we - # have to append one since the last vertex will be "ignored" by - # Path - if nverts == 1 or nverts > 1 and (xy[0] != xy[-1]).any(): - xy = np.concatenate([xy, [xy[0]]]) - else: - # if we aren't closed, and the last vertex matches the first, then - # we assume we have an unnecessary CLOSEPOLY vertex and remove it - if nverts > 2 and (xy[0] == xy[-1]).all(): - xy = xy[:-1] - self._path = Path(xy, closed=self._closed) - self.stale = True - - xy = property(get_xy, set_xy, - doc='The vertices of the path as (N, 2) numpy array.') - - -class Wedge(Patch): - """Wedge shaped patch.""" - - def __str__(self): - pars = (self.center[0], self.center[1], self.r, - self.theta1, self.theta2, self.width) - fmt = "Wedge(center=(%g, %g), r=%g, theta1=%g, theta2=%g, width=%s)" - return fmt % pars - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="width") - def __init__(self, center, r, theta1, theta2, width=None, **kwargs): - """ - A wedge centered at *x*, *y* center with radius *r* that - sweeps *theta1* to *theta2* (in degrees). If *width* is given, - then a partial wedge is drawn from inner radius *r* - *width* - to outer radius *r*. - - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - self.center = center - self.r, self.width = r, width - self.theta1, self.theta2 = theta1, theta2 - self._patch_transform = transforms.IdentityTransform() - self._recompute_path() - - def _recompute_path(self): - # Inner and outer rings are connected unless the annulus is complete - if abs((self.theta2 - self.theta1) - 360) <= 1e-12: - theta1, theta2 = 0, 360 - connector = Path.MOVETO - else: - theta1, theta2 = self.theta1, self.theta2 - connector = Path.LINETO - - # Form the outer ring - arc = Path.arc(theta1, theta2) - - if self.width is not None: - # Partial annulus needs to draw the outer ring - # followed by a reversed and scaled inner ring - v1 = arc.vertices - v2 = arc.vertices[::-1] * (self.r - self.width) / self.r - v = np.concatenate([v1, v2, [(0, 0)]]) - c = [*arc.codes, connector, *arc.codes[1:], Path.CLOSEPOLY] - else: - # Wedge doesn't need an inner ring - v = np.concatenate([arc.vertices, [(0, 0), (0, 0)]]) - c = [*arc.codes, connector, Path.CLOSEPOLY] - - # Shift and scale the wedge to the final location. - self._path = Path(v * self.r + self.center, c) - - def set_center(self, center): - self._path = None - self.center = center - self.stale = True - - def set_radius(self, radius): - self._path = None - self.r = radius - self.stale = True - - def set_theta1(self, theta1): - self._path = None - self.theta1 = theta1 - self.stale = True - - def set_theta2(self, theta2): - self._path = None - self.theta2 = theta2 - self.stale = True - - def set_width(self, width): - self._path = None - self.width = width - self.stale = True - - def get_path(self): - if self._path is None: - self._recompute_path() - return self._path - - -# COVERAGE NOTE: Not used internally or from examples -class Arrow(Patch): - """An arrow patch.""" - - def __str__(self): - return "Arrow()" - - _path = Path._create_closed([ - [0.0, 0.1], [0.0, -0.1], [0.8, -0.1], [0.8, -0.3], [1.0, 0.0], - [0.8, 0.3], [0.8, 0.1]]) - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="width") - def __init__(self, x, y, dx, dy, width=1.0, **kwargs): - """ - Draws an arrow from (*x*, *y*) to (*x* + *dx*, *y* + *dy*). - The width of the arrow is scaled by *width*. - - Parameters - ---------- - x : float - x coordinate of the arrow tail. - y : float - y coordinate of the arrow tail. - dx : float - Arrow length in the x direction. - dy : float - Arrow length in the y direction. - width : float, default: 1 - Scale factor for the width of the arrow. With a default value of 1, - the tail width is 0.2 and head width is 0.6. - **kwargs - Keyword arguments control the `Patch` properties: - - %(Patch:kwdoc)s - - See Also - -------- - FancyArrow - Patch that allows independent control of the head and tail - properties. - """ - super().__init__(**kwargs) - self._patch_transform = ( - transforms.Affine2D() - .scale(np.hypot(dx, dy), width) - .rotate(np.arctan2(dy, dx)) - .translate(x, y) - .frozen()) - - def get_path(self): - return self._path - - def get_patch_transform(self): - return self._patch_transform - - -class FancyArrow(Polygon): - """ - Like Arrow, but lets you set head width and head height independently. - """ - - _edge_default = True - - def __str__(self): - return "FancyArrow()" - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="width") - def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False, - head_width=None, head_length=None, shape='full', overhang=0, - head_starts_at_zero=False, **kwargs): - """ - Parameters - ---------- - x, y : float - The x and y coordinates of the arrow base. - - dx, dy : float - The length of the arrow along x and y direction. - - width : float, default: 0.001 - Width of full arrow tail. - - length_includes_head : bool, default: False - True if head is to be counted in calculating the length. - - head_width : float or None, default: 3*width - Total width of the full arrow head. - - head_length : float or None, default: 1.5*head_width - Length of arrow head. - - shape : {'full', 'left', 'right'}, default: 'full' - Draw the left-half, right-half, or full arrow. - - overhang : float, default: 0 - Fraction that the arrow is swept back (0 overhang means - triangular shape). Can be negative or greater than one. - - head_starts_at_zero : bool, default: False - If True, the head starts being drawn at coordinate 0 - instead of ending at coordinate 0. - - **kwargs - `.Patch` properties: - - %(Patch:kwdoc)s - """ - self._x = x - self._y = y - self._dx = dx - self._dy = dy - self._width = width - self._length_includes_head = length_includes_head - self._head_width = head_width - self._head_length = head_length - self._shape = shape - self._overhang = overhang - self._head_starts_at_zero = head_starts_at_zero - self._make_verts() - super().__init__(self.verts, closed=True, **kwargs) - - def set_data(self, *, x=None, y=None, dx=None, dy=None, width=None, - head_width=None, head_length=None): - """ - Set `.FancyArrow` x, y, dx, dy, width, head_with, and head_length. - Values left as None will not be updated. - - Parameters - ---------- - x, y : float or None, default: None - The x and y coordinates of the arrow base. - - dx, dy : float or None, default: None - The length of the arrow along x and y direction. - - width : float or None, default: None - Width of full arrow tail. - - head_width : float or None, default: None - Total width of the full arrow head. - - head_length : float or None, default: None - Length of arrow head. - """ - if x is not None: - self._x = x - if y is not None: - self._y = y - if dx is not None: - self._dx = dx - if dy is not None: - self._dy = dy - if width is not None: - self._width = width - if head_width is not None: - self._head_width = head_width - if head_length is not None: - self._head_length = head_length - self._make_verts() - self.set_xy(self.verts) - - def _make_verts(self): - if self._head_width is None: - head_width = 3 * self._width - else: - head_width = self._head_width - if self._head_length is None: - head_length = 1.5 * head_width - else: - head_length = self._head_length - - distance = np.hypot(self._dx, self._dy) - - if self._length_includes_head: - length = distance - else: - length = distance + head_length - if not length: - self.verts = np.empty([0, 2]) # display nothing if empty - else: - # start by drawing horizontal arrow, point at (0, 0) - hw, hl = head_width, head_length - hs, lw = self._overhang, self._width - left_half_arrow = np.array([ - [0.0, 0.0], # tip - [-hl, -hw / 2], # leftmost - [-hl * (1 - hs), -lw / 2], # meets stem - [-length, -lw / 2], # bottom left - [-length, 0], - ]) - # if we're not including the head, shift up by head length - if not self._length_includes_head: - left_half_arrow += [head_length, 0] - # if the head starts at 0, shift up by another head length - if self._head_starts_at_zero: - left_half_arrow += [head_length / 2, 0] - # figure out the shape, and complete accordingly - if self._shape == 'left': - coords = left_half_arrow - else: - right_half_arrow = left_half_arrow * [1, -1] - if self._shape == 'right': - coords = right_half_arrow - elif self._shape == 'full': - # The half-arrows contain the midpoint of the stem, - # which we can omit from the full arrow. Including it - # twice caused a problem with xpdf. - coords = np.concatenate([left_half_arrow[:-1], - right_half_arrow[-2::-1]]) - else: - raise ValueError(f"Got unknown shape: {self._shape!r}") - if distance != 0: - cx = self._dx / distance - sx = self._dy / distance - else: - # Account for division by zero - cx, sx = 0, 1 - M = [[cx, sx], [-sx, cx]] - self.verts = np.dot(coords, M) + [ - self._x + self._dx, - self._y + self._dy, - ] - - -_docstring.interpd.update( - FancyArrow="\n".join( - (inspect.getdoc(FancyArrow.__init__) or "").splitlines()[2:])) - - -class CirclePolygon(RegularPolygon): - """A polygon-approximation of a circle patch.""" - - def __str__(self): - s = "CirclePolygon((%g, %g), radius=%g, resolution=%d)" - return s % (self.xy[0], self.xy[1], self.radius, self.numvertices) - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="resolution") - def __init__(self, xy, radius=5, - resolution=20, # the number of vertices - ** kwargs): - """ - Create a circle at *xy* = (*x*, *y*) with given *radius*. - - This circle is approximated by a regular polygon with *resolution* - sides. For a smoother circle drawn with splines, see `Circle`. - - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__( - xy, resolution, radius=radius, orientation=0, **kwargs) - - -class Ellipse(Patch): - """A scale-free ellipse.""" - - def __str__(self): - pars = (self._center[0], self._center[1], - self.width, self.height, self.angle) - fmt = "Ellipse(xy=(%s, %s), width=%s, height=%s, angle=%s)" - return fmt % pars - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="angle") - def __init__(self, xy, width, height, angle=0, **kwargs): - """ - Parameters - ---------- - xy : (float, float) - xy coordinates of ellipse centre. - width : float - Total length (diameter) of horizontal axis. - height : float - Total length (diameter) of vertical axis. - angle : float, default: 0 - Rotation in degrees anti-clockwise. - - Notes - ----- - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - - self._center = xy - self._width, self._height = width, height - self._angle = angle - self._path = Path.unit_circle() - # Required for EllipseSelector with axes aspect ratio != 1 - # The patch is defined in data coordinates and when changing the - # selector with square modifier and not in data coordinates, we need - # to correct for the aspect ratio difference between the data and - # display coordinate systems. - self._aspect_ratio_correction = 1.0 - # Note: This cannot be calculated until this is added to an Axes - self._patch_transform = transforms.IdentityTransform() - - def _recompute_transform(self): - """ - Notes - ----- - This cannot be called until after this has been added to an Axes, - otherwise unit conversion will fail. This makes it very important to - call the accessor method and not directly access the transformation - member variable. - """ - center = (self.convert_xunits(self._center[0]), - self.convert_yunits(self._center[1])) - width = self.convert_xunits(self._width) - height = self.convert_yunits(self._height) - self._patch_transform = transforms.Affine2D() \ - .scale(width * 0.5, height * 0.5 * self._aspect_ratio_correction) \ - .rotate_deg(self.angle) \ - .scale(1, 1 / self._aspect_ratio_correction) \ - .translate(*center) - - def get_path(self): - """Return the path of the ellipse.""" - return self._path - - def get_patch_transform(self): - self._recompute_transform() - return self._patch_transform - - def set_center(self, xy): - """ - Set the center of the ellipse. - - Parameters - ---------- - xy : (float, float) - """ - self._center = xy - self.stale = True - - def get_center(self): - """Return the center of the ellipse.""" - return self._center - - center = property(get_center, set_center) - - def set_width(self, width): - """ - Set the width of the ellipse. - - Parameters - ---------- - width : float - """ - self._width = width - self.stale = True - - def get_width(self): - """ - Return the width of the ellipse. - """ - return self._width - - width = property(get_width, set_width) - - def set_height(self, height): - """ - Set the height of the ellipse. - - Parameters - ---------- - height : float - """ - self._height = height - self.stale = True - - def get_height(self): - """Return the height of the ellipse.""" - return self._height - - height = property(get_height, set_height) - - def set_angle(self, angle): - """ - Set the angle of the ellipse. - - Parameters - ---------- - angle : float - """ - self._angle = angle - self.stale = True - - def get_angle(self): - """Return the angle of the ellipse.""" - return self._angle - - angle = property(get_angle, set_angle) - - def get_corners(self): - """ - Return the corners of the ellipse bounding box. - - The bounding box orientation is moving anti-clockwise from the - lower left corner defined before rotation. - """ - return self.get_patch_transform().transform( - [(-1, -1), (1, -1), (1, 1), (-1, 1)]) - - -class Annulus(Patch): - """ - An elliptical annulus. - """ - - @_docstring.dedent_interpd - def __init__(self, xy, r, width, angle=0.0, **kwargs): - """ - Parameters - ---------- - xy : (float, float) - xy coordinates of annulus centre. - r : float or (float, float) - The radius, or semi-axes: - - - If float: radius of the outer circle. - - If two floats: semi-major and -minor axes of outer ellipse. - width : float - Width (thickness) of the annular ring. The width is measured inward - from the outer ellipse so that for the inner ellipse the semi-axes - are given by ``r - width``. *width* must be less than or equal to - the semi-minor axis. - angle : float, default: 0 - Rotation angle in degrees (anti-clockwise from the positive - x-axis). Ignored for circular annuli (i.e., if *r* is a scalar). - **kwargs - Keyword arguments control the `Patch` properties: - - %(Patch:kwdoc)s - """ - super().__init__(**kwargs) - - self.set_radii(r) - self.center = xy - self.width = width - self.angle = angle - self._path = None - - def __str__(self): - if self.a == self.b: - r = self.a - else: - r = (self.a, self.b) - - return "Annulus(xy=(%s, %s), r=%s, width=%s, angle=%s)" % \ - (*self.center, r, self.width, self.angle) - - def set_center(self, xy): - """ - Set the center of the annulus. - - Parameters - ---------- - xy : (float, float) - """ - self._center = xy - self._path = None - self.stale = True - - def get_center(self): - """Return the center of the annulus.""" - return self._center - - center = property(get_center, set_center) - - def set_width(self, width): - """ - Set the width (thickness) of the annulus ring. - - The width is measured inwards from the outer ellipse. - - Parameters - ---------- - width : float - """ - if min(self.a, self.b) <= width: - raise ValueError( - 'Width of annulus must be less than or equal semi-minor axis') - - self._width = width - self._path = None - self.stale = True - - def get_width(self): - """Return the width (thickness) of the annulus ring.""" - return self._width - - width = property(get_width, set_width) - - def set_angle(self, angle): - """ - Set the tilt angle of the annulus. - - Parameters - ---------- - angle : float - """ - self._angle = angle - self._path = None - self.stale = True - - def get_angle(self): - """Return the angle of the annulus.""" - return self._angle - - angle = property(get_angle, set_angle) - - def set_semimajor(self, a): - """ - Set the semi-major axis *a* of the annulus. - - Parameters - ---------- - a : float - """ - self.a = float(a) - self._path = None - self.stale = True - - def set_semiminor(self, b): - """ - Set the semi-minor axis *b* of the annulus. - - Parameters - ---------- - b : float - """ - self.b = float(b) - self._path = None - self.stale = True - - def set_radii(self, r): - """ - Set the semi-major (*a*) and semi-minor radii (*b*) of the annulus. - - Parameters - ---------- - r : float or (float, float) - The radius, or semi-axes: - - - If float: radius of the outer circle. - - If two floats: semi-major and -minor axes of outer ellipse. - """ - if np.shape(r) == (2,): - self.a, self.b = r - elif np.shape(r) == (): - self.a = self.b = float(r) - else: - raise ValueError("Parameter 'r' must be one or two floats.") - - self._path = None - self.stale = True - - def get_radii(self): - """Return the semi-major and semi-minor radii of the annulus.""" - return self.a, self.b - - radii = property(get_radii, set_radii) - - def _transform_verts(self, verts, a, b): - return transforms.Affine2D() \ - .scale(*self._convert_xy_units((a, b))) \ - .rotate_deg(self.angle) \ - .translate(*self._convert_xy_units(self.center)) \ - .transform(verts) - - def _recompute_path(self): - # circular arc - arc = Path.arc(0, 360) - - # annulus needs to draw an outer ring - # followed by a reversed and scaled inner ring - a, b, w = self.a, self.b, self.width - v1 = self._transform_verts(arc.vertices, a, b) - v2 = self._transform_verts(arc.vertices[::-1], a - w, b - w) - v = np.vstack([v1, v2, v1[0, :], (0, 0)]) - c = np.hstack([arc.codes, Path.MOVETO, - arc.codes[1:], Path.MOVETO, - Path.CLOSEPOLY]) - self._path = Path(v, c) - - def get_path(self): - if self._path is None: - self._recompute_path() - return self._path - - -class Circle(Ellipse): - """ - A circle patch. - """ - def __str__(self): - pars = self.center[0], self.center[1], self.radius - fmt = "Circle(xy=(%g, %g), radius=%g)" - return fmt % pars - - @_docstring.dedent_interpd - def __init__(self, xy, radius=5, **kwargs): - """ - Create a true circle at center *xy* = (*x*, *y*) with given *radius*. - - Unlike `CirclePolygon` which is a polygonal approximation, this uses - Bezier splines and is much closer to a scale-free circle. - - Valid keyword arguments are: - - %(Patch:kwdoc)s - """ - super().__init__(xy, radius * 2, radius * 2, **kwargs) - self.radius = radius - - def set_radius(self, radius): - """ - Set the radius of the circle. - - Parameters - ---------- - radius : float - """ - self.width = self.height = 2 * radius - self.stale = True - - def get_radius(self): - """Return the radius of the circle.""" - return self.width / 2. - - radius = property(get_radius, set_radius) - - -class Arc(Ellipse): - """ - An elliptical arc, i.e. a segment of an ellipse. - - Due to internal optimizations, the arc cannot be filled. - """ - - def __str__(self): - pars = (self.center[0], self.center[1], self.width, - self.height, self.angle, self.theta1, self.theta2) - fmt = ("Arc(xy=(%g, %g), width=%g, " - "height=%g, angle=%g, theta1=%g, theta2=%g)") - return fmt % pars - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="angle") - def __init__(self, xy, width, height, angle=0.0, - theta1=0.0, theta2=360.0, **kwargs): - """ - Parameters - ---------- - xy : (float, float) - The center of the ellipse. - - width : float - The length of the horizontal axis. - - height : float - The length of the vertical axis. - - angle : float - Rotation of the ellipse in degrees (counterclockwise). - - theta1, theta2 : float, default: 0, 360 - Starting and ending angles of the arc in degrees. These values - are relative to *angle*, e.g. if *angle* = 45 and *theta1* = 90 - the absolute starting angle is 135. - Default *theta1* = 0, *theta2* = 360, i.e. a complete ellipse. - The arc is drawn in the counterclockwise direction. - Angles greater than or equal to 360, or smaller than 0, are - represented by an equivalent angle in the range [0, 360), by - taking the input value mod 360. - - Other Parameters - ---------------- - **kwargs : `.Patch` properties - Most `.Patch` properties are supported as keyword arguments, - except *fill* and *facecolor* because filling is not supported. - - %(Patch:kwdoc)s - """ - fill = kwargs.setdefault('fill', False) - if fill: - raise ValueError("Arc objects can not be filled") - - super().__init__(xy, width, height, angle=angle, **kwargs) - - self.theta1 = theta1 - self.theta2 = theta2 - (self._theta1, self._theta2, self._stretched_width, - self._stretched_height) = self._theta_stretch() - self._path = Path.arc(self._theta1, self._theta2) - - @artist.allow_rasterization - def draw(self, renderer): - """ - Draw the arc to the given *renderer*. - - Notes - ----- - Ellipses are normally drawn using an approximation that uses - eight cubic Bezier splines. The error of this approximation - is 1.89818e-6, according to this unverified source: - - Lancaster, Don. *Approximating a Circle or an Ellipse Using - Four Bezier Cubic Splines.* - - https://www.tinaja.com/glib/ellipse4.pdf - - There is a use case where very large ellipses must be drawn - with very high accuracy, and it is too expensive to render the - entire ellipse with enough segments (either splines or line - segments). Therefore, in the case where either radius of the - ellipse is large enough that the error of the spline - approximation will be visible (greater than one pixel offset - from the ideal), a different technique is used. - - In that case, only the visible parts of the ellipse are drawn, - with each visible arc using a fixed number of spline segments - (8). The algorithm proceeds as follows: - - 1. The points where the ellipse intersects the axes (or figure) - bounding box are located. (This is done by performing an inverse - transformation on the bbox such that it is relative to the unit - circle -- this makes the intersection calculation much easier than - doing rotated ellipse intersection directly.) - - This uses the "line intersecting a circle" algorithm from: - - Vince, John. *Geometry for Computer Graphics: Formulae, - Examples & Proofs.* London: Springer-Verlag, 2005. - - 2. The angles of each of the intersection points are calculated. - - 3. Proceeding counterclockwise starting in the positive - x-direction, each of the visible arc-segments between the - pairs of vertices are drawn using the Bezier arc - approximation technique implemented in `.Path.arc`. - """ - if not self.get_visible(): - return - - self._recompute_transform() - - self._update_path() - # Get width and height in pixels we need to use - # `self.get_data_transform` rather than `self.get_transform` - # because we want the transform from dataspace to the - # screen space to estimate how big the arc will be in physical - # units when rendered (the transform that we get via - # `self.get_transform()` goes from an idealized unit-radius - # space to screen space). - data_to_screen_trans = self.get_data_transform() - pwidth, pheight = ( - data_to_screen_trans.transform((self._stretched_width, - self._stretched_height)) - - data_to_screen_trans.transform((0, 0))) - inv_error = (1.0 / 1.89818e-6) * 0.5 - - if pwidth < inv_error and pheight < inv_error: - return Patch.draw(self, renderer) - - def line_circle_intersect(x0, y0, x1, y1): - dx = x1 - x0 - dy = y1 - y0 - dr2 = dx * dx + dy * dy - D = x0 * y1 - x1 * y0 - D2 = D * D - discrim = dr2 - D2 - if discrim >= 0.0: - sign_dy = np.copysign(1, dy) # +/-1, never 0. - sqrt_discrim = np.sqrt(discrim) - return np.array( - [[(D * dy + sign_dy * dx * sqrt_discrim) / dr2, - (-D * dx + abs(dy) * sqrt_discrim) / dr2], - [(D * dy - sign_dy * dx * sqrt_discrim) / dr2, - (-D * dx - abs(dy) * sqrt_discrim) / dr2]]) - else: - return np.empty((0, 2)) - - def segment_circle_intersect(x0, y0, x1, y1): - epsilon = 1e-9 - if x1 < x0: - x0e, x1e = x1, x0 - else: - x0e, x1e = x0, x1 - if y1 < y0: - y0e, y1e = y1, y0 - else: - y0e, y1e = y0, y1 - xys = line_circle_intersect(x0, y0, x1, y1) - xs, ys = xys.T - return xys[ - (x0e - epsilon < xs) & (xs < x1e + epsilon) - & (y0e - epsilon < ys) & (ys < y1e + epsilon) - ] - - # Transform the axes (or figure) box_path so that it is relative to - # the unit circle in the same way that it is relative to the desired - # ellipse. - box_path_transform = ( - transforms.BboxTransformTo((self.axes or self.figure).bbox) - - self.get_transform()) - box_path = Path.unit_rectangle().transformed(box_path_transform) - - thetas = set() - # For each of the point pairs, there is a line segment - for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]): - xy = segment_circle_intersect(*p0, *p1) - x, y = xy.T - # arctan2 return [-pi, pi), the rest of our angles are in - # [0, 360], adjust as needed. - theta = (np.rad2deg(np.arctan2(y, x)) + 360) % 360 - thetas.update( - theta[(self._theta1 < theta) & (theta < self._theta2)]) - thetas = sorted(thetas) + [self._theta2] - last_theta = self._theta1 - theta1_rad = np.deg2rad(self._theta1) - inside = box_path.contains_point( - (np.cos(theta1_rad), np.sin(theta1_rad)) - ) - - # save original path - path_original = self._path - for theta in thetas: - if inside: - self._path = Path.arc(last_theta, theta, 8) - Patch.draw(self, renderer) - inside = False - else: - inside = True - last_theta = theta - - # restore original path - self._path = path_original - - def _update_path(self): - # Compute new values and update and set new _path if any value changed - stretched = self._theta_stretch() - if any(a != b for a, b in zip( - stretched, (self._theta1, self._theta2, self._stretched_width, - self._stretched_height))): - (self._theta1, self._theta2, self._stretched_width, - self._stretched_height) = stretched - self._path = Path.arc(self._theta1, self._theta2) - - def _theta_stretch(self): - # If the width and height of ellipse are not equal, take into account - # stretching when calculating angles to draw between - def theta_stretch(theta, scale): - theta = np.deg2rad(theta) - x = np.cos(theta) - y = np.sin(theta) - stheta = np.rad2deg(np.arctan2(scale * y, x)) - # arctan2 has the range [-pi, pi], we expect [0, 2*pi] - return (stheta + 360) % 360 - - width = self.convert_xunits(self.width) - height = self.convert_yunits(self.height) - if ( - # if we need to stretch the angles because we are distorted - width != height - # and we are not doing a full circle. - # - # 0 and 360 do not exactly round-trip through the angle - # stretching (due to both float precision limitations and - # the difference between the range of arctan2 [-pi, pi] and - # this method [0, 360]) so avoid doing it if we don't have to. - and not (self.theta1 != self.theta2 and - self.theta1 % 360 == self.theta2 % 360) - ): - theta1 = theta_stretch(self.theta1, width / height) - theta2 = theta_stretch(self.theta2, width / height) - return theta1, theta2, width, height - return self.theta1, self.theta2, width, height - - -def bbox_artist(artist, renderer, props=None, fill=True): - """ - A debug function to draw a rectangle around the bounding - box returned by an artist's `.Artist.get_window_extent` - to test whether the artist is returning the correct bbox. - - *props* is a dict of rectangle props with the additional property - 'pad' that sets the padding around the bbox in points. - """ - if props is None: - props = {} - props = props.copy() # don't want to alter the pad externally - pad = props.pop('pad', 4) - pad = renderer.points_to_pixels(pad) - bbox = artist.get_window_extent(renderer) - r = Rectangle( - xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2), - width=bbox.width + pad, height=bbox.height + pad, - fill=fill, transform=transforms.IdentityTransform(), clip_on=False) - r.update(props) - r.draw(renderer) - - -def draw_bbox(bbox, renderer, color='k', trans=None): - """ - A debug function to draw a rectangle around the bounding - box returned by an artist's `.Artist.get_window_extent` - to test whether the artist is returning the correct bbox. - """ - r = Rectangle(xy=bbox.p0, width=bbox.width, height=bbox.height, - edgecolor=color, fill=False, clip_on=False) - if trans is not None: - r.set_transform(trans) - r.draw(renderer) - - -class _Style: - """ - A base class for the Styles. It is meant to be a container class, - where actual styles are declared as subclass of it, and it - provides some helper functions. - """ - - def __init_subclass__(cls): - # Automatically perform docstring interpolation on the subclasses: - # This allows listing the supported styles via - # - %(BoxStyle:table)s - # - %(ConnectionStyle:table)s - # - %(ArrowStyle:table)s - # and additionally adding .. ACCEPTS: blocks via - # - %(BoxStyle:table_and_accepts)s - # - %(ConnectionStyle:table_and_accepts)s - # - %(ArrowStyle:table_and_accepts)s - _docstring.interpd.update({ - f"{cls.__name__}:table": cls.pprint_styles(), - f"{cls.__name__}:table_and_accepts": ( - cls.pprint_styles() - + "\n\n .. ACCEPTS: [" - + "|".join(map(" '{}' ".format, cls._style_list)) - + "]") - }) - - def __new__(cls, stylename, **kwargs): - """Return the instance of the subclass with the given style name.""" - # The "class" should have the _style_list attribute, which is a mapping - # of style names to style classes. - _list = stylename.replace(" ", "").split(",") - _name = _list[0].lower() - try: - _cls = cls._style_list[_name] - except KeyError as err: - raise ValueError(f"Unknown style: {stylename!r}") from err - try: - _args_pair = [cs.split("=") for cs in _list[1:]] - _args = {k: float(v) for k, v in _args_pair} - except ValueError as err: - raise ValueError( - f"Incorrect style argument: {stylename!r}") from err - return _cls(**{**_args, **kwargs}) - - @classmethod - def get_styles(cls): - """Return a dictionary of available styles.""" - return cls._style_list - - @classmethod - def pprint_styles(cls): - """Return the available styles as pretty-printed string.""" - table = [('Class', 'Name', 'Attrs'), - *[(cls.__name__, - # Add backquotes, as - and | have special meaning in reST. - f'``{name}``', - # [1:-1] drops the surrounding parentheses. - str(inspect.signature(cls))[1:-1] or 'None') - for name, cls in cls._style_list.items()]] - # Convert to rst table. - col_len = [max(len(cell) for cell in column) for column in zip(*table)] - table_formatstr = ' '.join('=' * cl for cl in col_len) - rst_table = '\n'.join([ - '', - table_formatstr, - ' '.join(cell.ljust(cl) for cell, cl in zip(table[0], col_len)), - table_formatstr, - *[' '.join(cell.ljust(cl) for cell, cl in zip(row, col_len)) - for row in table[1:]], - table_formatstr, - ]) - return textwrap.indent(rst_table, prefix=' ' * 4) - - @classmethod - def register(cls, name, style): - """Register a new style.""" - if not issubclass(style, cls._Base): - raise ValueError("%s must be a subclass of %s" % (style, - cls._Base)) - cls._style_list[name] = style - - -def _register_style(style_list, cls=None, *, name=None): - """Class decorator that stashes a class in a (style) dictionary.""" - if cls is None: - return functools.partial(_register_style, style_list, name=name) - style_list[name or cls.__name__.lower()] = cls - return cls - - -@_docstring.dedent_interpd -class BoxStyle(_Style): - """ - `BoxStyle` is a container class which defines several - boxstyle classes, which are used for `FancyBboxPatch`. - - A style object can be created as:: - - BoxStyle.Round(pad=0.2) - - or:: - - BoxStyle("Round", pad=0.2) - - or:: - - BoxStyle("Round, pad=0.2") - - The following boxstyle classes are defined. - - %(BoxStyle:table)s - - An instance of a boxstyle class is a callable object, with the signature :: - - __call__(self, x0, y0, width, height, mutation_size) -> Path - - *x0*, *y0*, *width* and *height* specify the location and size of the box - to be drawn; *mutation_size* scales the outline properties such as padding. - """ - - _style_list = {} - - @_register_style(_style_list) - class Square: - """A square box.""" - - def __init__(self, pad=0.3): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - """ - self.pad = pad - - def __call__(self, x0, y0, width, height, mutation_size): - pad = mutation_size * self.pad - # width and height with padding added. - width, height = width + 2 * pad, height + 2 * pad - # boundary of the padded box - x0, y0 = x0 - pad, y0 - pad - x1, y1 = x0 + width, y0 + height - return Path._create_closed( - [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]) - - @_register_style(_style_list) - class Circle: - """A circular box.""" - - def __init__(self, pad=0.3): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - """ - self.pad = pad - - def __call__(self, x0, y0, width, height, mutation_size): - pad = mutation_size * self.pad - width, height = width + 2 * pad, height + 2 * pad - # boundary of the padded box - x0, y0 = x0 - pad, y0 - pad - return Path.circle((x0 + width / 2, y0 + height / 2), - max(width, height) / 2) - - @_register_style(_style_list) - class Ellipse: - """ - An elliptical box. - - .. versionadded:: 3.7 - """ - - def __init__(self, pad=0.3): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - """ - self.pad = pad - - def __call__(self, x0, y0, width, height, mutation_size): - pad = mutation_size * self.pad - width, height = width + 2 * pad, height + 2 * pad - # boundary of the padded box - x0, y0 = x0 - pad, y0 - pad - a = width / math.sqrt(2) - b = height / math.sqrt(2) - trans = Affine2D().scale(a, b).translate(x0 + width / 2, - y0 + height / 2) - return trans.transform_path(Path.unit_circle()) - - @_register_style(_style_list) - class LArrow: - """A box in the shape of a left-pointing arrow.""" - - def __init__(self, pad=0.3): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - """ - self.pad = pad - - def __call__(self, x0, y0, width, height, mutation_size): - # padding - pad = mutation_size * self.pad - # width and height with padding added. - width, height = width + 2 * pad, height + 2 * pad - # boundary of the padded box - x0, y0 = x0 - pad, y0 - pad, - x1, y1 = x0 + width, y0 + height - - dx = (y1 - y0) / 2 - dxx = dx / 2 - x0 = x0 + pad / 1.4 # adjust by ~sqrt(2) - - return Path._create_closed( - [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1), - (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), - (x0 + dxx, y0 - dxx), # arrow - (x0 + dxx, y0)]) - - @_register_style(_style_list) - class RArrow(LArrow): - """A box in the shape of a right-pointing arrow.""" - - def __call__(self, x0, y0, width, height, mutation_size): - p = BoxStyle.LArrow.__call__( - self, x0, y0, width, height, mutation_size) - p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0] - return p - - @_register_style(_style_list) - class DArrow: - """A box in the shape of a two-way arrow.""" - # Modified from LArrow to add a right arrow to the bbox. - - def __init__(self, pad=0.3): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - """ - self.pad = pad - - def __call__(self, x0, y0, width, height, mutation_size): - # padding - pad = mutation_size * self.pad - # width and height with padding added. - # The width is padded by the arrows, so we don't need to pad it. - height = height + 2 * pad - # boundary of the padded box - x0, y0 = x0 - pad, y0 - pad - x1, y1 = x0 + width, y0 + height - - dx = (y1 - y0) / 2 - dxx = dx / 2 - x0 = x0 + pad / 1.4 # adjust by ~sqrt(2) - - return Path._create_closed([ - (x0 + dxx, y0), (x1, y0), # bot-segment - (x1, y0 - dxx), (x1 + dx + dxx, y0 + dx), - (x1, y1 + dxx), # right-arrow - (x1, y1), (x0 + dxx, y1), # top-segment - (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx), - (x0 + dxx, y0 - dxx), # left-arrow - (x0 + dxx, y0)]) - - @_register_style(_style_list) - class Round: - """A box with round corners.""" - - def __init__(self, pad=0.3, rounding_size=None): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - rounding_size : float, default: *pad* - Radius of the corners. - """ - self.pad = pad - self.rounding_size = rounding_size - - def __call__(self, x0, y0, width, height, mutation_size): - - # padding - pad = mutation_size * self.pad - - # size of the rounding corner - if self.rounding_size: - dr = mutation_size * self.rounding_size - else: - dr = pad - - width, height = width + 2 * pad, height + 2 * pad - - x0, y0 = x0 - pad, y0 - pad, - x1, y1 = x0 + width, y0 + height - - # Round corners are implemented as quadratic Bezier, e.g., - # [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner. - cp = [(x0 + dr, y0), - (x1 - dr, y0), - (x1, y0), (x1, y0 + dr), - (x1, y1 - dr), - (x1, y1), (x1 - dr, y1), - (x0 + dr, y1), - (x0, y1), (x0, y1 - dr), - (x0, y0 + dr), - (x0, y0), (x0 + dr, y0), - (x0 + dr, y0)] - - com = [Path.MOVETO, - Path.LINETO, - Path.CURVE3, Path.CURVE3, - Path.LINETO, - Path.CURVE3, Path.CURVE3, - Path.LINETO, - Path.CURVE3, Path.CURVE3, - Path.LINETO, - Path.CURVE3, Path.CURVE3, - Path.CLOSEPOLY] - - return Path(cp, com) - - @_register_style(_style_list) - class Round4: - """A box with rounded edges.""" - - def __init__(self, pad=0.3, rounding_size=None): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - rounding_size : float, default: *pad*/2 - Rounding of edges. - """ - self.pad = pad - self.rounding_size = rounding_size - - def __call__(self, x0, y0, width, height, mutation_size): - - # padding - pad = mutation_size * self.pad - - # Rounding size; defaults to half of the padding. - if self.rounding_size: - dr = mutation_size * self.rounding_size - else: - dr = pad / 2. - - width = width + 2 * pad - 2 * dr - height = height + 2 * pad - 2 * dr - - x0, y0 = x0 - pad + dr, y0 - pad + dr, - x1, y1 = x0 + width, y0 + height - - cp = [(x0, y0), - (x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0), - (x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1), - (x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1), - (x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0), - (x0, y0)] - - com = [Path.MOVETO, - Path.CURVE4, Path.CURVE4, Path.CURVE4, - Path.CURVE4, Path.CURVE4, Path.CURVE4, - Path.CURVE4, Path.CURVE4, Path.CURVE4, - Path.CURVE4, Path.CURVE4, Path.CURVE4, - Path.CLOSEPOLY] - - return Path(cp, com) - - @_register_style(_style_list) - class Sawtooth: - """A box with a sawtooth outline.""" - - def __init__(self, pad=0.3, tooth_size=None): - """ - Parameters - ---------- - pad : float, default: 0.3 - The amount of padding around the original box. - tooth_size : float, default: *pad*/2 - Size of the sawtooth. - """ - self.pad = pad - self.tooth_size = tooth_size - - def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size): - - # padding - pad = mutation_size * self.pad - - # size of sawtooth - if self.tooth_size is None: - tooth_size = self.pad * .5 * mutation_size - else: - tooth_size = self.tooth_size * mutation_size - - hsz = tooth_size / 2 - width = width + 2 * pad - tooth_size - height = height + 2 * pad - tooth_size - - # the sizes of the vertical and horizontal sawtooth are - # separately adjusted to fit the given box size. - dsx_n = round((width - tooth_size) / (tooth_size * 2)) * 2 - dsx = (width - tooth_size) / dsx_n - dsy_n = round((height - tooth_size) / (tooth_size * 2)) * 2 - dsy = (height - tooth_size) / dsy_n - - x0, y0 = x0 - pad + hsz, y0 - pad + hsz - x1, y1 = x0 + width, y0 + height - - xs = [ - x0, *np.linspace(x0 + hsz, x1 - hsz, 2 * dsx_n + 1), # bottom - *([x1, x1 + hsz, x1, x1 - hsz] * dsy_n)[:2*dsy_n+2], # right - x1, *np.linspace(x1 - hsz, x0 + hsz, 2 * dsx_n + 1), # top - *([x0, x0 - hsz, x0, x0 + hsz] * dsy_n)[:2*dsy_n+2], # left - ] - ys = [ - *([y0, y0 - hsz, y0, y0 + hsz] * dsx_n)[:2*dsx_n+2], # bottom - y0, *np.linspace(y0 + hsz, y1 - hsz, 2 * dsy_n + 1), # right - *([y1, y1 + hsz, y1, y1 - hsz] * dsx_n)[:2*dsx_n+2], # top - y1, *np.linspace(y1 - hsz, y0 + hsz, 2 * dsy_n + 1), # left - ] - - return [*zip(xs, ys), (xs[0], ys[0])] - - def __call__(self, x0, y0, width, height, mutation_size): - saw_vertices = self._get_sawtooth_vertices(x0, y0, width, - height, mutation_size) - return Path(saw_vertices, closed=True) - - @_register_style(_style_list) - class Roundtooth(Sawtooth): - """A box with a rounded sawtooth outline.""" - - def __call__(self, x0, y0, width, height, mutation_size): - saw_vertices = self._get_sawtooth_vertices(x0, y0, - width, height, - mutation_size) - # Add a trailing vertex to allow us to close the polygon correctly - saw_vertices = np.concatenate([saw_vertices, [saw_vertices[0]]]) - codes = ([Path.MOVETO] + - [Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) + - [Path.CLOSEPOLY]) - return Path(saw_vertices, codes) - - -@_docstring.dedent_interpd -class ConnectionStyle(_Style): - """ - `ConnectionStyle` is a container class which defines - several connectionstyle classes, which is used to create a path - between two points. These are mainly used with `FancyArrowPatch`. - - A connectionstyle object can be either created as:: - - ConnectionStyle.Arc3(rad=0.2) - - or:: - - ConnectionStyle("Arc3", rad=0.2) - - or:: - - ConnectionStyle("Arc3, rad=0.2") - - The following classes are defined - - %(ConnectionStyle:table)s - - An instance of any connection style class is a callable object, - whose call signature is:: - - __call__(self, posA, posB, - patchA=None, patchB=None, - shrinkA=2., shrinkB=2.) - - and it returns a `.Path` instance. *posA* and *posB* are - tuples of (x, y) coordinates of the two points to be - connected. *patchA* (or *patchB*) is given, the returned path is - clipped so that it start (or end) from the boundary of the - patch. The path is further shrunk by *shrinkA* (or *shrinkB*) - which is given in points. - """ - - _style_list = {} - - class _Base: - """ - A base class for connectionstyle classes. The subclass needs - to implement a *connect* method whose call signature is:: - - connect(posA, posB) - - where posA and posB are tuples of x, y coordinates to be - connected. The method needs to return a path connecting two - points. This base class defines a __call__ method, and a few - helper methods. - """ - - @_api.deprecated("3.7") - class SimpleEvent: - def __init__(self, xy): - self.x, self.y = xy - - def _in_patch(self, patch): - """ - Return a predicate function testing whether a point *xy* is - contained in *patch*. - """ - return lambda xy: patch.contains( - SimpleNamespace(x=xy[0], y=xy[1]))[0] - - def _clip(self, path, in_start, in_stop): - """ - Clip *path* at its start by the region where *in_start* returns - True, and at its stop by the region where *in_stop* returns True. - - The original path is assumed to start in the *in_start* region and - to stop in the *in_stop* region. - """ - if in_start: - try: - _, path = split_path_inout(path, in_start) - except ValueError: - pass - if in_stop: - try: - path, _ = split_path_inout(path, in_stop) - except ValueError: - pass - return path - - def __call__(self, posA, posB, - shrinkA=2., shrinkB=2., patchA=None, patchB=None): - """ - Call the *connect* method to create a path between *posA* and - *posB*; then clip and shrink the path. - """ - path = self.connect(posA, posB) - path = self._clip( - path, - self._in_patch(patchA) if patchA else None, - self._in_patch(patchB) if patchB else None, - ) - path = self._clip( - path, - inside_circle(*path.vertices[0], shrinkA) if shrinkA else None, - inside_circle(*path.vertices[-1], shrinkB) if shrinkB else None - ) - return path - - @_register_style(_style_list) - class Arc3(_Base): - """ - Creates a simple quadratic Bézier curve between two - points. The curve is created so that the middle control point - (C1) is located at the same distance from the start (C0) and - end points(C2) and the distance of the C1 to the line - connecting C0-C2 is *rad* times the distance of C0-C2. - """ - - def __init__(self, rad=0.): - """ - Parameters - ---------- - rad : float - Curvature of the curve. - """ - self.rad = rad - - def connect(self, posA, posB): - x1, y1 = posA - x2, y2 = posB - x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2. - dx, dy = x2 - x1, y2 - y1 - - f = self.rad - - cx, cy = x12 + f * dy, y12 - f * dx - - vertices = [(x1, y1), - (cx, cy), - (x2, y2)] - codes = [Path.MOVETO, - Path.CURVE3, - Path.CURVE3] - - return Path(vertices, codes) - - @_register_style(_style_list) - class Angle3(_Base): - """ - Creates a simple quadratic Bézier curve between two points. The middle - control point is placed at the intersecting point of two lines which - cross the start and end point, and have a slope of *angleA* and - *angleB*, respectively. - """ - - def __init__(self, angleA=90, angleB=0): - """ - Parameters - ---------- - angleA : float - Starting angle of the path. - - angleB : float - Ending angle of the path. - """ - - self.angleA = angleA - self.angleB = angleB - - def connect(self, posA, posB): - x1, y1 = posA - x2, y2 = posB - - cosA = math.cos(math.radians(self.angleA)) - sinA = math.sin(math.radians(self.angleA)) - cosB = math.cos(math.radians(self.angleB)) - sinB = math.sin(math.radians(self.angleB)) - - cx, cy = get_intersection(x1, y1, cosA, sinA, - x2, y2, cosB, sinB) - - vertices = [(x1, y1), (cx, cy), (x2, y2)] - codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3] - - return Path(vertices, codes) - - @_register_style(_style_list) - class Angle(_Base): - """ - Creates a piecewise continuous quadratic Bézier path between two - points. The path has a one passing-through point placed at the - intersecting point of two lines which cross the start and end point, - and have a slope of *angleA* and *angleB*, respectively. - The connecting edges are rounded with *rad*. - """ - - def __init__(self, angleA=90, angleB=0, rad=0.): - """ - Parameters - ---------- - angleA : float - Starting angle of the path. - - angleB : float - Ending angle of the path. - - rad : float - Rounding radius of the edge. - """ - - self.angleA = angleA - self.angleB = angleB - - self.rad = rad - - def connect(self, posA, posB): - x1, y1 = posA - x2, y2 = posB - - cosA = math.cos(math.radians(self.angleA)) - sinA = math.sin(math.radians(self.angleA)) - cosB = math.cos(math.radians(self.angleB)) - sinB = math.sin(math.radians(self.angleB)) - - cx, cy = get_intersection(x1, y1, cosA, sinA, - x2, y2, cosB, sinB) - - vertices = [(x1, y1)] - codes = [Path.MOVETO] - - if self.rad == 0.: - vertices.append((cx, cy)) - codes.append(Path.LINETO) - else: - dx1, dy1 = x1 - cx, y1 - cy - d1 = np.hypot(dx1, dy1) - f1 = self.rad / d1 - dx2, dy2 = x2 - cx, y2 - cy - d2 = np.hypot(dx2, dy2) - f2 = self.rad / d2 - vertices.extend([(cx + dx1 * f1, cy + dy1 * f1), - (cx, cy), - (cx + dx2 * f2, cy + dy2 * f2)]) - codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3]) - - vertices.append((x2, y2)) - codes.append(Path.LINETO) - - return Path(vertices, codes) - - @_register_style(_style_list) - class Arc(_Base): - """ - Creates a piecewise continuous quadratic Bézier path between two - points. The path can have two passing-through points, a - point placed at the distance of *armA* and angle of *angleA* from - point A, another point with respect to point B. The edges are - rounded with *rad*. - """ - - def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.): - """ - Parameters - ---------- - angleA : float - Starting angle of the path. - - angleB : float - Ending angle of the path. - - armA : float or None - Length of the starting arm. - - armB : float or None - Length of the ending arm. - - rad : float - Rounding radius of the edges. - """ - - self.angleA = angleA - self.angleB = angleB - self.armA = armA - self.armB = armB - - self.rad = rad - - def connect(self, posA, posB): - x1, y1 = posA - x2, y2 = posB - - vertices = [(x1, y1)] - rounded = [] - codes = [Path.MOVETO] - - if self.armA: - cosA = math.cos(math.radians(self.angleA)) - sinA = math.sin(math.radians(self.angleA)) - # x_armA, y_armB - d = self.armA - self.rad - rounded.append((x1 + d * cosA, y1 + d * sinA)) - d = self.armA - rounded.append((x1 + d * cosA, y1 + d * sinA)) - - if self.armB: - cosB = math.cos(math.radians(self.angleB)) - sinB = math.sin(math.radians(self.angleB)) - x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB - - if rounded: - xp, yp = rounded[-1] - dx, dy = x_armB - xp, y_armB - yp - dd = (dx * dx + dy * dy) ** .5 - - rounded.append((xp + self.rad * dx / dd, - yp + self.rad * dy / dd)) - vertices.extend(rounded) - codes.extend([Path.LINETO, - Path.CURVE3, - Path.CURVE3]) - else: - xp, yp = vertices[-1] - dx, dy = x_armB - xp, y_armB - yp - dd = (dx * dx + dy * dy) ** .5 - - d = dd - self.rad - rounded = [(xp + d * dx / dd, yp + d * dy / dd), - (x_armB, y_armB)] - - if rounded: - xp, yp = rounded[-1] - dx, dy = x2 - xp, y2 - yp - dd = (dx * dx + dy * dy) ** .5 - - rounded.append((xp + self.rad * dx / dd, - yp + self.rad * dy / dd)) - vertices.extend(rounded) - codes.extend([Path.LINETO, - Path.CURVE3, - Path.CURVE3]) - - vertices.append((x2, y2)) - codes.append(Path.LINETO) - - return Path(vertices, codes) - - @_register_style(_style_list) - class Bar(_Base): - """ - A line with *angle* between A and B with *armA* and *armB*. One of the - arms is extended so that they are connected in a right angle. The - length of *armA* is determined by (*armA* + *fraction* x AB distance). - Same for *armB*. - """ - - def __init__(self, armA=0., armB=0., fraction=0.3, angle=None): - """ - Parameters - ---------- - armA : float - Minimum length of armA. - - armB : float - Minimum length of armB. - - fraction : float - A fraction of the distance between two points that will be - added to armA and armB. - - angle : float or None - Angle of the connecting line (if None, parallel to A and B). - """ - self.armA = armA - self.armB = armB - self.fraction = fraction - self.angle = angle - - def connect(self, posA, posB): - x1, y1 = posA - x20, y20 = x2, y2 = posB - - theta1 = math.atan2(y2 - y1, x2 - x1) - dx, dy = x2 - x1, y2 - y1 - dd = (dx * dx + dy * dy) ** .5 - ddx, ddy = dx / dd, dy / dd - - armA, armB = self.armA, self.armB - - if self.angle is not None: - theta0 = np.deg2rad(self.angle) - dtheta = theta1 - theta0 - dl = dd * math.sin(dtheta) - dL = dd * math.cos(dtheta) - x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0) - armB = armB - dl - - # update - dx, dy = x2 - x1, y2 - y1 - dd2 = (dx * dx + dy * dy) ** .5 - ddx, ddy = dx / dd2, dy / dd2 - - arm = max(armA, armB) - f = self.fraction * dd + arm - - cx1, cy1 = x1 + f * ddy, y1 - f * ddx - cx2, cy2 = x2 + f * ddy, y2 - f * ddx - - vertices = [(x1, y1), - (cx1, cy1), - (cx2, cy2), - (x20, y20)] - codes = [Path.MOVETO, - Path.LINETO, - Path.LINETO, - Path.LINETO] - - return Path(vertices, codes) - - -def _point_along_a_line(x0, y0, x1, y1, d): - """ - Return the point on the line connecting (*x0*, *y0*) -- (*x1*, *y1*) whose - distance from (*x0*, *y0*) is *d*. - """ - dx, dy = x0 - x1, y0 - y1 - ff = d / (dx * dx + dy * dy) ** .5 - x2, y2 = x0 - ff * dx, y0 - ff * dy - - return x2, y2 - - -@_docstring.dedent_interpd -class ArrowStyle(_Style): - """ - `ArrowStyle` is a container class which defines several - arrowstyle classes, which is used to create an arrow path along a - given path. These are mainly used with `FancyArrowPatch`. - - An arrowstyle object can be either created as:: - - ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4) - - or:: - - ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4) - - or:: - - ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4") - - The following classes are defined - - %(ArrowStyle:table)s - - An instance of any arrow style class is a callable object, - whose call signature is:: - - __call__(self, path, mutation_size, linewidth, aspect_ratio=1.) - - and it returns a tuple of a `.Path` instance and a boolean - value. *path* is a `.Path` instance along which the arrow - will be drawn. *mutation_size* and *aspect_ratio* have the same - meaning as in `BoxStyle`. *linewidth* is a line width to be - stroked. This is meant to be used to correct the location of the - head so that it does not overshoot the destination point, but not all - classes support it. - - Notes - ----- - *angleA* and *angleB* specify the orientation of the bracket, as either a - clockwise or counterclockwise angle depending on the arrow type. 0 degrees - means perpendicular to the line connecting the arrow's head and tail. - - .. plot:: gallery/text_labels_and_annotations/angles_on_bracket_arrows.py - """ - - _style_list = {} - - class _Base: - """ - Arrow Transmuter Base class - - ArrowTransmuterBase and its derivatives are used to make a fancy - arrow around a given path. The __call__ method returns a path - (which will be used to create a PathPatch instance) and a boolean - value indicating the path is open therefore is not fillable. This - class is not an artist and actual drawing of the fancy arrow is - done by the FancyArrowPatch class. - """ - - # The derived classes are required to be able to be initialized - # w/o arguments, i.e., all its argument (except self) must have - # the default values. - - @staticmethod - def ensure_quadratic_bezier(path): - """ - Some ArrowStyle classes only works with a simple quadratic - Bézier curve (created with `.ConnectionStyle.Arc3` or - `.ConnectionStyle.Angle3`). This static method checks if the - provided path is a simple quadratic Bézier curve and returns its - control points if true. - """ - segments = list(path.iter_segments()) - if (len(segments) != 2 or segments[0][1] != Path.MOVETO or - segments[1][1] != Path.CURVE3): - raise ValueError( - "'path' is not a valid quadratic Bezier curve") - return [*segments[0][0], *segments[1][0]] - - def transmute(self, path, mutation_size, linewidth): - """ - The transmute method is the very core of the ArrowStyle class and - must be overridden in the subclasses. It receives the *path* - object along which the arrow will be drawn, and the - *mutation_size*, with which the arrow head etc. will be scaled. - The *linewidth* may be used to adjust the path so that it does not - pass beyond the given points. It returns a tuple of a `.Path` - instance and a boolean. The boolean value indicate whether the - path can be filled or not. The return value can also be a list of - paths and list of booleans of the same length. - """ - raise NotImplementedError('Derived must override') - - def __call__(self, path, mutation_size, linewidth, - aspect_ratio=1.): - """ - The __call__ method is a thin wrapper around the transmute method - and takes care of the aspect ratio. - """ - - if aspect_ratio is not None: - # Squeeze the given height by the aspect_ratio - vertices = path.vertices / [1, aspect_ratio] - path_shrunk = Path(vertices, path.codes) - # call transmute method with squeezed height. - path_mutated, fillable = self.transmute(path_shrunk, - mutation_size, - linewidth) - if np.iterable(fillable): - # Restore the height - path_list = [Path(p.vertices * [1, aspect_ratio], p.codes) - for p in path_mutated] - return path_list, fillable - else: - return path_mutated, fillable - else: - return self.transmute(path, mutation_size, linewidth) - - class _Curve(_Base): - """ - A simple arrow which will work with any path instance. The - returned path is the concatenation of the original path, and at - most two paths representing the arrow head or bracket at the start - point and at the end point. The arrow heads can be either open - or closed. - """ - - arrow = "-" - fillbegin = fillend = False # Whether arrows are filled. - - def __init__(self, head_length=.4, head_width=.2, widthA=1., widthB=1., - lengthA=0.2, lengthB=0.2, angleA=0, angleB=0, scaleA=None, - scaleB=None): - """ - Parameters - ---------- - head_length : float, default: 0.4 - Length of the arrow head, relative to *mutation_size*. - head_width : float, default: 0.2 - Width of the arrow head, relative to *mutation_size*. - widthA, widthB : float, default: 1.0 - Width of the bracket. - lengthA, lengthB : float, default: 0.2 - Length of the bracket. - angleA, angleB : float, default: 0 - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - scaleA, scaleB : float, default: *mutation_size* - The scale of the brackets. - """ - - self.head_length, self.head_width = head_length, head_width - self.widthA, self.widthB = widthA, widthB - self.lengthA, self.lengthB = lengthA, lengthB - self.angleA, self.angleB = angleA, angleB - self.scaleA, self.scaleB = scaleA, scaleB - - self._beginarrow_head = False - self._beginarrow_bracket = False - self._endarrow_head = False - self._endarrow_bracket = False - - if "-" not in self.arrow: - raise ValueError("arrow must have the '-' between " - "the two heads") - - beginarrow, endarrow = self.arrow.split("-", 1) - - if beginarrow == "<": - self._beginarrow_head = True - self._beginarrow_bracket = False - elif beginarrow == "<|": - self._beginarrow_head = True - self._beginarrow_bracket = False - self.fillbegin = True - elif beginarrow in ("]", "|"): - self._beginarrow_head = False - self._beginarrow_bracket = True - - if endarrow == ">": - self._endarrow_head = True - self._endarrow_bracket = False - elif endarrow == "|>": - self._endarrow_head = True - self._endarrow_bracket = False - self.fillend = True - elif endarrow in ("[", "|"): - self._endarrow_head = False - self._endarrow_bracket = True - - super().__init__() - - def _get_arrow_wedge(self, x0, y0, x1, y1, - head_dist, cos_t, sin_t, linewidth): - """ - Return the paths for arrow heads. Since arrow lines are - drawn with capstyle=projected, The arrow goes beyond the - desired point. This method also returns the amount of the path - to be shrunken so that it does not overshoot. - """ - - # arrow from x0, y0 to x1, y1 - dx, dy = x0 - x1, y0 - y1 - - cp_distance = np.hypot(dx, dy) - - # pad_projected : amount of pad to account the - # overshooting of the projection of the wedge - pad_projected = (.5 * linewidth / sin_t) - - # Account for division by zero - if cp_distance == 0: - cp_distance = 1 - - # apply pad for projected edge - ddx = pad_projected * dx / cp_distance - ddy = pad_projected * dy / cp_distance - - # offset for arrow wedge - dx = dx / cp_distance * head_dist - dy = dy / cp_distance * head_dist - - dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy - dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy - - vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1), - (x1 + ddx, y1 + ddy), - (x1 + ddx + dx2, y1 + ddy + dy2)] - codes_arrow = [Path.MOVETO, - Path.LINETO, - Path.LINETO] - - return vertices_arrow, codes_arrow, ddx, ddy - - def _get_bracket(self, x0, y0, - x1, y1, width, length, angle): - - cos_t, sin_t = get_cos_sin(x1, y1, x0, y0) - - # arrow from x0, y0 to x1, y1 - from matplotlib.bezier import get_normal_points - x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width) - - dx, dy = length * cos_t, length * sin_t - - vertices_arrow = [(x1 + dx, y1 + dy), - (x1, y1), - (x2, y2), - (x2 + dx, y2 + dy)] - codes_arrow = [Path.MOVETO, - Path.LINETO, - Path.LINETO, - Path.LINETO] - - if angle: - trans = transforms.Affine2D().rotate_deg_around(x0, y0, angle) - vertices_arrow = trans.transform(vertices_arrow) - - return vertices_arrow, codes_arrow - - def transmute(self, path, mutation_size, linewidth): - # docstring inherited - if self._beginarrow_head or self._endarrow_head: - head_length = self.head_length * mutation_size - head_width = self.head_width * mutation_size - head_dist = np.hypot(head_length, head_width) - cos_t, sin_t = head_length / head_dist, head_width / head_dist - - scaleA = mutation_size if self.scaleA is None else self.scaleA - scaleB = mutation_size if self.scaleB is None else self.scaleB - - # begin arrow - x0, y0 = path.vertices[0] - x1, y1 = path.vertices[1] - - # If there is no room for an arrow and a line, then skip the arrow - has_begin_arrow = self._beginarrow_head and (x0, y0) != (x1, y1) - verticesA, codesA, ddxA, ddyA = ( - self._get_arrow_wedge(x1, y1, x0, y0, - head_dist, cos_t, sin_t, linewidth) - if has_begin_arrow - else ([], [], 0, 0) - ) - - # end arrow - x2, y2 = path.vertices[-2] - x3, y3 = path.vertices[-1] - - # If there is no room for an arrow and a line, then skip the arrow - has_end_arrow = self._endarrow_head and (x2, y2) != (x3, y3) - verticesB, codesB, ddxB, ddyB = ( - self._get_arrow_wedge(x2, y2, x3, y3, - head_dist, cos_t, sin_t, linewidth) - if has_end_arrow - else ([], [], 0, 0) - ) - - # This simple code will not work if ddx, ddy is greater than the - # separation between vertices. - paths = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)], - path.vertices[1:-1], - [(x3 + ddxB, y3 + ddyB)]]), - path.codes)] - fills = [False] - - if has_begin_arrow: - if self.fillbegin: - paths.append( - Path([*verticesA, (0, 0)], [*codesA, Path.CLOSEPOLY])) - fills.append(True) - else: - paths.append(Path(verticesA, codesA)) - fills.append(False) - elif self._beginarrow_bracket: - x0, y0 = path.vertices[0] - x1, y1 = path.vertices[1] - verticesA, codesA = self._get_bracket(x0, y0, x1, y1, - self.widthA * scaleA, - self.lengthA * scaleA, - self.angleA) - - paths.append(Path(verticesA, codesA)) - fills.append(False) - - if has_end_arrow: - if self.fillend: - fills.append(True) - paths.append( - Path([*verticesB, (0, 0)], [*codesB, Path.CLOSEPOLY])) - else: - fills.append(False) - paths.append(Path(verticesB, codesB)) - elif self._endarrow_bracket: - x0, y0 = path.vertices[-1] - x1, y1 = path.vertices[-2] - verticesB, codesB = self._get_bracket(x0, y0, x1, y1, - self.widthB * scaleB, - self.lengthB * scaleB, - self.angleB) - - paths.append(Path(verticesB, codesB)) - fills.append(False) - - return paths, fills - - @_register_style(_style_list, name="-") - class Curve(_Curve): - """A simple curve without any arrow head.""" - - def __init__(self): # hide head_length, head_width - # These attributes (whose values come from backcompat) only matter - # if someone modifies beginarrow/etc. on an ArrowStyle instance. - super().__init__(head_length=.2, head_width=.1) - - @_register_style(_style_list, name="<-") - class CurveA(_Curve): - """An arrow with a head at its start point.""" - arrow = "<-" - - @_register_style(_style_list, name="->") - class CurveB(_Curve): - """An arrow with a head at its end point.""" - arrow = "->" - - @_register_style(_style_list, name="<->") - class CurveAB(_Curve): - """An arrow with heads both at the start and the end point.""" - arrow = "<->" - - @_register_style(_style_list, name="<|-") - class CurveFilledA(_Curve): - """An arrow with filled triangle head at the start.""" - arrow = "<|-" - - @_register_style(_style_list, name="-|>") - class CurveFilledB(_Curve): - """An arrow with filled triangle head at the end.""" - arrow = "-|>" - - @_register_style(_style_list, name="<|-|>") - class CurveFilledAB(_Curve): - """An arrow with filled triangle heads at both ends.""" - arrow = "<|-|>" - - @_register_style(_style_list, name="]-") - class BracketA(_Curve): - """An arrow with an outward square bracket at its start.""" - arrow = "]-" - - def __init__(self, widthA=1., lengthA=0.2, angleA=0): - """ - Parameters - ---------- - widthA : float, default: 1.0 - Width of the bracket. - lengthA : float, default: 0.2 - Length of the bracket. - angleA : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA) - - @_register_style(_style_list, name="-[") - class BracketB(_Curve): - """An arrow with an outward square bracket at its end.""" - arrow = "-[" - - def __init__(self, widthB=1., lengthB=0.2, angleB=0): - """ - Parameters - ---------- - widthB : float, default: 1.0 - Width of the bracket. - lengthB : float, default: 0.2 - Length of the bracket. - angleB : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB) - - @_register_style(_style_list, name="]-[") - class BracketAB(_Curve): - """An arrow with outward square brackets at both ends.""" - arrow = "]-[" - - def __init__(self, - widthA=1., lengthA=0.2, angleA=0, - widthB=1., lengthB=0.2, angleB=0): - """ - Parameters - ---------- - widthA, widthB : float, default: 1.0 - Width of the bracket. - lengthA, lengthB : float, default: 0.2 - Length of the bracket. - angleA, angleB : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA, - widthB=widthB, lengthB=lengthB, angleB=angleB) - - @_register_style(_style_list, name="|-|") - class BarAB(_Curve): - """An arrow with vertical bars ``|`` at both ends.""" - arrow = "|-|" - - def __init__(self, widthA=1., angleA=0, widthB=1., angleB=0): - """ - Parameters - ---------- - widthA, widthB : float, default: 1.0 - Width of the bracket. - angleA, angleB : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthA=widthA, lengthA=0, angleA=angleA, - widthB=widthB, lengthB=0, angleB=angleB) - - @_register_style(_style_list, name=']->') - class BracketCurve(_Curve): - """ - An arrow with an outward square bracket at its start and a head at - the end. - """ - arrow = "]->" - - def __init__(self, widthA=1., lengthA=0.2, angleA=None): - """ - Parameters - ---------- - widthA : float, default: 1.0 - Width of the bracket. - lengthA : float, default: 0.2 - Length of the bracket. - angleA : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA) - - @_register_style(_style_list, name='<-[') - class CurveBracket(_Curve): - """ - An arrow with an outward square bracket at its end and a head at - the start. - """ - arrow = "<-[" - - def __init__(self, widthB=1., lengthB=0.2, angleB=None): - """ - Parameters - ---------- - widthB : float, default: 1.0 - Width of the bracket. - lengthB : float, default: 0.2 - Length of the bracket. - angleB : float, default: 0 degrees - Orientation of the bracket, as a counterclockwise angle. - 0 degrees means perpendicular to the line. - """ - super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB) - - @_register_style(_style_list) - class Simple(_Base): - """A simple arrow. Only works with a quadratic Bézier curve.""" - - def __init__(self, head_length=.5, head_width=.5, tail_width=.2): - """ - Parameters - ---------- - head_length : float, default: 0.5 - Length of the arrow head. - - head_width : float, default: 0.5 - Width of the arrow head. - - tail_width : float, default: 0.2 - Width of the arrow tail. - """ - self.head_length, self.head_width, self.tail_width = \ - head_length, head_width, tail_width - super().__init__() - - def transmute(self, path, mutation_size, linewidth): - # docstring inherited - x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path) - - # divide the path into a head and a tail - head_length = self.head_length * mutation_size - in_f = inside_circle(x2, y2, head_length) - arrow_path = [(x0, y0), (x1, y1), (x2, y2)] - - try: - arrow_out, arrow_in = \ - split_bezier_intersecting_with_closedpath(arrow_path, in_f) - except NonIntersectingPathException: - # if this happens, make a straight line of the head_length - # long. - x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length) - x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2) - arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)] - arrow_out = None - - # head - head_width = self.head_width * mutation_size - head_left, head_right = make_wedged_bezier2(arrow_in, - head_width / 2., wm=.5) - - # tail - if arrow_out is not None: - tail_width = self.tail_width * mutation_size - tail_left, tail_right = get_parallels(arrow_out, - tail_width / 2.) - - patch_path = [(Path.MOVETO, tail_right[0]), - (Path.CURVE3, tail_right[1]), - (Path.CURVE3, tail_right[2]), - (Path.LINETO, head_right[0]), - (Path.CURVE3, head_right[1]), - (Path.CURVE3, head_right[2]), - (Path.CURVE3, head_left[1]), - (Path.CURVE3, head_left[0]), - (Path.LINETO, tail_left[2]), - (Path.CURVE3, tail_left[1]), - (Path.CURVE3, tail_left[0]), - (Path.LINETO, tail_right[0]), - (Path.CLOSEPOLY, tail_right[0]), - ] - else: - patch_path = [(Path.MOVETO, head_right[0]), - (Path.CURVE3, head_right[1]), - (Path.CURVE3, head_right[2]), - (Path.CURVE3, head_left[1]), - (Path.CURVE3, head_left[0]), - (Path.CLOSEPOLY, head_left[0]), - ] - - path = Path([p for c, p in patch_path], [c for c, p in patch_path]) - - return path, True - - @_register_style(_style_list) - class Fancy(_Base): - """A fancy arrow. Only works with a quadratic Bézier curve.""" - - def __init__(self, head_length=.4, head_width=.4, tail_width=.4): - """ - Parameters - ---------- - head_length : float, default: 0.4 - Length of the arrow head. - - head_width : float, default: 0.4 - Width of the arrow head. - - tail_width : float, default: 0.4 - Width of the arrow tail. - """ - self.head_length, self.head_width, self.tail_width = \ - head_length, head_width, tail_width - super().__init__() - - def transmute(self, path, mutation_size, linewidth): - # docstring inherited - x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path) - - # divide the path into a head and a tail - head_length = self.head_length * mutation_size - arrow_path = [(x0, y0), (x1, y1), (x2, y2)] - - # path for head - in_f = inside_circle(x2, y2, head_length) - try: - path_out, path_in = split_bezier_intersecting_with_closedpath( - arrow_path, in_f) - except NonIntersectingPathException: - # if this happens, make a straight line of the head_length - # long. - x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length) - x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2) - arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)] - path_head = arrow_path - else: - path_head = path_in - - # path for head - in_f = inside_circle(x2, y2, head_length * .8) - path_out, path_in = split_bezier_intersecting_with_closedpath( - arrow_path, in_f) - path_tail = path_out - - # head - head_width = self.head_width * mutation_size - head_l, head_r = make_wedged_bezier2(path_head, - head_width / 2., - wm=.6) - - # tail - tail_width = self.tail_width * mutation_size - tail_left, tail_right = make_wedged_bezier2(path_tail, - tail_width * .5, - w1=1., wm=0.6, w2=0.3) - - # path for head - in_f = inside_circle(x0, y0, tail_width * .3) - path_in, path_out = split_bezier_intersecting_with_closedpath( - arrow_path, in_f) - tail_start = path_in[-1] - - head_right, head_left = head_r, head_l - patch_path = [(Path.MOVETO, tail_start), - (Path.LINETO, tail_right[0]), - (Path.CURVE3, tail_right[1]), - (Path.CURVE3, tail_right[2]), - (Path.LINETO, head_right[0]), - (Path.CURVE3, head_right[1]), - (Path.CURVE3, head_right[2]), - (Path.CURVE3, head_left[1]), - (Path.CURVE3, head_left[0]), - (Path.LINETO, tail_left[2]), - (Path.CURVE3, tail_left[1]), - (Path.CURVE3, tail_left[0]), - (Path.LINETO, tail_start), - (Path.CLOSEPOLY, tail_start), - ] - path = Path([p for c, p in patch_path], [c for c, p in patch_path]) - - return path, True - - @_register_style(_style_list) - class Wedge(_Base): - """ - Wedge(?) shape. Only works with a quadratic Bézier curve. The - start point has a width of the *tail_width* and the end point has a - width of 0. At the middle, the width is *shrink_factor*x*tail_width*. - """ - - def __init__(self, tail_width=.3, shrink_factor=0.5): - """ - Parameters - ---------- - tail_width : float, default: 0.3 - Width of the tail. - - shrink_factor : float, default: 0.5 - Fraction of the arrow width at the middle point. - """ - self.tail_width = tail_width - self.shrink_factor = shrink_factor - super().__init__() - - def transmute(self, path, mutation_size, linewidth): - # docstring inherited - x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path) - - arrow_path = [(x0, y0), (x1, y1), (x2, y2)] - b_plus, b_minus = make_wedged_bezier2( - arrow_path, - self.tail_width * mutation_size / 2., - wm=self.shrink_factor) - - patch_path = [(Path.MOVETO, b_plus[0]), - (Path.CURVE3, b_plus[1]), - (Path.CURVE3, b_plus[2]), - (Path.LINETO, b_minus[2]), - (Path.CURVE3, b_minus[1]), - (Path.CURVE3, b_minus[0]), - (Path.CLOSEPOLY, b_minus[0]), - ] - path = Path([p for c, p in patch_path], [c for c, p in patch_path]) - - return path, True - - -class FancyBboxPatch(Patch): - """ - A fancy box around a rectangle with lower left at *xy* = (*x*, *y*) - with specified width and height. - - `.FancyBboxPatch` is similar to `.Rectangle`, but it draws a fancy box - around the rectangle. The transformation of the rectangle box to the - fancy box is delegated to the style classes defined in `.BoxStyle`. - """ - - _edge_default = True - - def __str__(self): - s = self.__class__.__name__ + "((%g, %g), width=%g, height=%g)" - return s % (self._x, self._y, self._width, self._height) - - @_docstring.dedent_interpd - def __init__(self, xy, width, height, boxstyle="round", *, - mutation_scale=1, mutation_aspect=1, **kwargs): - """ - Parameters - ---------- - xy : float, float - The lower left corner of the box. - - width : float - The width of the box. - - height : float - The height of the box. - - boxstyle : str or `matplotlib.patches.BoxStyle` - The style of the fancy box. This can either be a `.BoxStyle` - instance or a string of the style name and optionally comma - separated attributes (e.g. "Round, pad=0.2"). This string is - passed to `.BoxStyle` to construct a `.BoxStyle` object. See - there for a full documentation. - - The following box styles are available: - - %(BoxStyle:table)s - - mutation_scale : float, default: 1 - Scaling factor applied to the attributes of the box style - (e.g. pad or rounding_size). - - mutation_aspect : float, default: 1 - The height of the rectangle will be squeezed by this value before - the mutation and the mutated box will be stretched by the inverse - of it. For example, this allows different horizontal and vertical - padding. - - Other Parameters - ---------------- - **kwargs : `.Patch` properties - - %(Patch:kwdoc)s - """ - - super().__init__(**kwargs) - self._x, self._y = xy - self._width = width - self._height = height - self.set_boxstyle(boxstyle) - self._mutation_scale = mutation_scale - self._mutation_aspect = mutation_aspect - self.stale = True - - @_docstring.dedent_interpd - def set_boxstyle(self, boxstyle=None, **kwargs): - """ - Set the box style, possibly with further attributes. - - Attributes from the previous box style are not reused. - - Without argument (or with ``boxstyle=None``), the available box styles - are returned as a human-readable string. - - Parameters - ---------- - boxstyle : str or `matplotlib.patches.BoxStyle` - The style of the box: either a `.BoxStyle` instance, or a string, - which is the style name and optionally comma separated attributes - (e.g. "Round,pad=0.2"). Such a string is used to construct a - `.BoxStyle` object, as documented in that class. - - The following box styles are available: - - %(BoxStyle:table_and_accepts)s - - **kwargs - Additional attributes for the box style. See the table above for - supported parameters. - - Examples - -------- - :: - - set_boxstyle("Round,pad=0.2") - set_boxstyle("round", pad=0.2) - """ - if boxstyle is None: - return BoxStyle.pprint_styles() - self._bbox_transmuter = ( - BoxStyle(boxstyle, **kwargs) - if isinstance(boxstyle, str) else boxstyle) - self.stale = True - - def get_boxstyle(self): - """Return the boxstyle object.""" - return self._bbox_transmuter - - def set_mutation_scale(self, scale): - """ - Set the mutation scale. - - Parameters - ---------- - scale : float - """ - self._mutation_scale = scale - self.stale = True - - def get_mutation_scale(self): - """Return the mutation scale.""" - return self._mutation_scale - - def set_mutation_aspect(self, aspect): - """ - Set the aspect ratio of the bbox mutation. - - Parameters - ---------- - aspect : float - """ - self._mutation_aspect = aspect - self.stale = True - - def get_mutation_aspect(self): - """Return the aspect ratio of the bbox mutation.""" - return (self._mutation_aspect if self._mutation_aspect is not None - else 1) # backcompat. - - def get_path(self): - """Return the mutated path of the rectangle.""" - boxstyle = self.get_boxstyle() - m_aspect = self.get_mutation_aspect() - # Call boxstyle with y, height squeezed by aspect_ratio. - path = boxstyle(self._x, self._y / m_aspect, - self._width, self._height / m_aspect, - self.get_mutation_scale()) - return Path(path.vertices * [1, m_aspect], path.codes) # Unsqueeze y. - - # Following methods are borrowed from the Rectangle class. - - def get_x(self): - """Return the left coord of the rectangle.""" - return self._x - - def get_y(self): - """Return the bottom coord of the rectangle.""" - return self._y - - def get_width(self): - """Return the width of the rectangle.""" - return self._width - - def get_height(self): - """Return the height of the rectangle.""" - return self._height - - def set_x(self, x): - """ - Set the left coord of the rectangle. - - Parameters - ---------- - x : float - """ - self._x = x - self.stale = True - - def set_y(self, y): - """ - Set the bottom coord of the rectangle. - - Parameters - ---------- - y : float - """ - self._y = y - self.stale = True - - def set_width(self, w): - """ - Set the rectangle width. - - Parameters - ---------- - w : float - """ - self._width = w - self.stale = True - - def set_height(self, h): - """ - Set the rectangle height. - - Parameters - ---------- - h : float - """ - self._height = h - self.stale = True - - def set_bounds(self, *args): - """ - Set the bounds of the rectangle. - - Call signatures:: - - set_bounds(left, bottom, width, height) - set_bounds((left, bottom, width, height)) - - Parameters - ---------- - left, bottom : float - The coordinates of the bottom left corner of the rectangle. - width, height : float - The width/height of the rectangle. - """ - if len(args) == 1: - l, b, w, h = args[0] - else: - l, b, w, h = args - self._x = l - self._y = b - self._width = w - self._height = h - self.stale = True - - def get_bbox(self): - """Return the `.Bbox`.""" - return transforms.Bbox.from_bounds(self._x, self._y, - self._width, self._height) - - -class FancyArrowPatch(Patch): - """ - A fancy arrow patch. It draws an arrow using the `ArrowStyle`. - - The head and tail positions are fixed at the specified start and end points - of the arrow, but the size and shape (in display coordinates) of the arrow - does not change when the axis is moved or zoomed. - """ - _edge_default = True - - def __str__(self): - if self._posA_posB is not None: - (x1, y1), (x2, y2) = self._posA_posB - return f"{type(self).__name__}(({x1:g}, {y1:g})->({x2:g}, {y2:g}))" - else: - return f"{type(self).__name__}({self._path_original})" - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="path") - def __init__(self, posA=None, posB=None, path=None, - arrowstyle="simple", connectionstyle="arc3", - patchA=None, patchB=None, - shrinkA=2, shrinkB=2, - mutation_scale=1, mutation_aspect=1, - **kwargs): - """ - There are two ways for defining an arrow: - - - If *posA* and *posB* are given, a path connecting two points is - created according to *connectionstyle*. The path will be - clipped with *patchA* and *patchB* and further shrunken by - *shrinkA* and *shrinkB*. An arrow is drawn along this - resulting path using the *arrowstyle* parameter. - - - Alternatively if *path* is provided, an arrow is drawn along this - path and *patchA*, *patchB*, *shrinkA*, and *shrinkB* are ignored. - - Parameters - ---------- - posA, posB : (float, float), default: None - (x, y) coordinates of arrow tail and arrow head respectively. - - path : `~matplotlib.path.Path`, default: None - If provided, an arrow is drawn along this path and *patchA*, - *patchB*, *shrinkA*, and *shrinkB* are ignored. - - arrowstyle : str or `.ArrowStyle`, default: 'simple' - The `.ArrowStyle` with which the fancy arrow is drawn. If a - string, it should be one of the available arrowstyle names, with - optional comma-separated attributes. The optional attributes are - meant to be scaled with the *mutation_scale*. The following arrow - styles are available: - - %(ArrowStyle:table)s - - connectionstyle : str or `.ConnectionStyle` or None, optional, \ -default: 'arc3' - The `.ConnectionStyle` with which *posA* and *posB* are connected. - If a string, it should be one of the available connectionstyle - names, with optional comma-separated attributes. The following - connection styles are available: - - %(ConnectionStyle:table)s - - patchA, patchB : `.Patch`, default: None - Head and tail patches, respectively. - - shrinkA, shrinkB : float, default: 2 - Shrinking factor of the tail and head of the arrow respectively. - - mutation_scale : float, default: 1 - Value with which attributes of *arrowstyle* (e.g., *head_length*) - will be scaled. - - mutation_aspect : None or float, default: None - The height of the rectangle will be squeezed by this value before - the mutation and the mutated box will be stretched by the inverse - of it. - - Other Parameters - ---------------- - **kwargs : `.Patch` properties, optional - Here is a list of available `.Patch` properties: - - %(Patch:kwdoc)s - - In contrast to other patches, the default ``capstyle`` and - ``joinstyle`` for `FancyArrowPatch` are set to ``"round"``. - """ - # Traditionally, the cap- and joinstyle for FancyArrowPatch are round - kwargs.setdefault("joinstyle", JoinStyle.round) - kwargs.setdefault("capstyle", CapStyle.round) - - super().__init__(**kwargs) - - if posA is not None and posB is not None and path is None: - self._posA_posB = [posA, posB] - - if connectionstyle is None: - connectionstyle = "arc3" - self.set_connectionstyle(connectionstyle) - - elif posA is None and posB is None and path is not None: - self._posA_posB = None - else: - raise ValueError("Either posA and posB, or path need to provided") - - self.patchA = patchA - self.patchB = patchB - self.shrinkA = shrinkA - self.shrinkB = shrinkB - - self._path_original = path - - self.set_arrowstyle(arrowstyle) - - self._mutation_scale = mutation_scale - self._mutation_aspect = mutation_aspect - - self._dpi_cor = 1.0 - - def set_positions(self, posA, posB): - """ - Set the start and end positions of the connecting path. - - Parameters - ---------- - posA, posB : None, tuple - (x, y) coordinates of arrow tail and arrow head respectively. If - `None` use current value. - """ - if posA is not None: - self._posA_posB[0] = posA - if posB is not None: - self._posA_posB[1] = posB - self.stale = True - - def set_patchA(self, patchA): - """ - Set the tail patch. - - Parameters - ---------- - patchA : `.patches.Patch` - """ - self.patchA = patchA - self.stale = True - - def set_patchB(self, patchB): - """ - Set the head patch. - - Parameters - ---------- - patchB : `.patches.Patch` - """ - self.patchB = patchB - self.stale = True - - @_docstring.dedent_interpd - def set_connectionstyle(self, connectionstyle=None, **kwargs): - """ - Set the connection style, possibly with further attributes. - - Attributes from the previous connection style are not reused. - - Without argument (or with ``connectionstyle=None``), the available box - styles are returned as a human-readable string. - - Parameters - ---------- - connectionstyle : str or `matplotlib.patches.ConnectionStyle` - The style of the connection: either a `.ConnectionStyle` instance, - or a string, which is the style name and optionally comma separated - attributes (e.g. "Arc,armA=30,rad=10"). Such a string is used to - construct a `.ConnectionStyle` object, as documented in that class. - - The following connection styles are available: - - %(ConnectionStyle:table_and_accepts)s - - **kwargs - Additional attributes for the connection style. See the table above - for supported parameters. - - Examples - -------- - :: - - set_connectionstyle("Arc,armA=30,rad=10") - set_connectionstyle("arc", armA=30, rad=10) - """ - if connectionstyle is None: - return ConnectionStyle.pprint_styles() - self._connector = ( - ConnectionStyle(connectionstyle, **kwargs) - if isinstance(connectionstyle, str) else connectionstyle) - self.stale = True - - def get_connectionstyle(self): - """Return the `ConnectionStyle` used.""" - return self._connector - - def set_arrowstyle(self, arrowstyle=None, **kwargs): - """ - Set the arrow style, possibly with further attributes. - - Attributes from the previous arrow style are not reused. - - Without argument (or with ``arrowstyle=None``), the available box - styles are returned as a human-readable string. - - Parameters - ---------- - arrowstyle : str or `matplotlib.patches.ArrowStyle` - The style of the arrow: either a `.ArrowStyle` instance, or a - string, which is the style name and optionally comma separated - attributes (e.g. "Fancy,head_length=0.2"). Such a string is used to - construct a `.ArrowStyle` object, as documented in that class. - - The following arrow styles are available: - - %(ArrowStyle:table_and_accepts)s - - **kwargs - Additional attributes for the arrow style. See the table above for - supported parameters. - - Examples - -------- - :: - - set_arrowstyle("Fancy,head_length=0.2") - set_arrowstyle("fancy", head_length=0.2) - """ - if arrowstyle is None: - return ArrowStyle.pprint_styles() - self._arrow_transmuter = ( - ArrowStyle(arrowstyle, **kwargs) - if isinstance(arrowstyle, str) else arrowstyle) - self.stale = True - - def get_arrowstyle(self): - """Return the arrowstyle object.""" - return self._arrow_transmuter - - def set_mutation_scale(self, scale): - """ - Set the mutation scale. - - Parameters - ---------- - scale : float - """ - self._mutation_scale = scale - self.stale = True - - def get_mutation_scale(self): - """ - Return the mutation scale. - - Returns - ------- - scalar - """ - return self._mutation_scale - - def set_mutation_aspect(self, aspect): - """ - Set the aspect ratio of the bbox mutation. - - Parameters - ---------- - aspect : float - """ - self._mutation_aspect = aspect - self.stale = True - - def get_mutation_aspect(self): - """Return the aspect ratio of the bbox mutation.""" - return (self._mutation_aspect if self._mutation_aspect is not None - else 1) # backcompat. - - def get_path(self): - """Return the path of the arrow in the data coordinates.""" - # The path is generated in display coordinates, then converted back to - # data coordinates. - _path, fillable = self._get_path_in_displaycoord() - if np.iterable(fillable): - _path = Path.make_compound_path(*_path) - return self.get_transform().inverted().transform_path(_path) - - def _get_path_in_displaycoord(self): - """Return the mutated path of the arrow in display coordinates.""" - dpi_cor = self._dpi_cor - - if self._posA_posB is not None: - posA = self._convert_xy_units(self._posA_posB[0]) - posB = self._convert_xy_units(self._posA_posB[1]) - (posA, posB) = self.get_transform().transform((posA, posB)) - _path = self.get_connectionstyle()(posA, posB, - patchA=self.patchA, - patchB=self.patchB, - shrinkA=self.shrinkA * dpi_cor, - shrinkB=self.shrinkB * dpi_cor - ) - else: - _path = self.get_transform().transform_path(self._path_original) - - _path, fillable = self.get_arrowstyle()( - _path, - self.get_mutation_scale() * dpi_cor, - self.get_linewidth() * dpi_cor, - self.get_mutation_aspect()) - - return _path, fillable - - def draw(self, renderer): - if not self.get_visible(): - return - - # FIXME: dpi_cor is for the dpi-dependency of the linewidth. There - # could be room for improvement. Maybe _get_path_in_displaycoord could - # take a renderer argument, but get_path should be adapted too. - self._dpi_cor = renderer.points_to_pixels(1.) - path, fillable = self._get_path_in_displaycoord() - - if not np.iterable(fillable): - path = [path] - fillable = [fillable] - - affine = transforms.IdentityTransform() - - self._draw_paths_with_artist_properties( - renderer, - [(p, affine, self._facecolor if f and self._facecolor[3] else None) - for p, f in zip(path, fillable)]) - - -class ConnectionPatch(FancyArrowPatch): - """A patch that connects two points (possibly in different axes).""" - - def __str__(self): - return "ConnectionPatch((%g, %g), (%g, %g))" % \ - (self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1]) - - @_docstring.dedent_interpd - @_api.make_keyword_only("3.6", name="axesA") - def __init__(self, xyA, xyB, coordsA, coordsB=None, - axesA=None, axesB=None, - arrowstyle="-", - connectionstyle="arc3", - patchA=None, - patchB=None, - shrinkA=0., - shrinkB=0., - mutation_scale=10., - mutation_aspect=None, - clip_on=False, - **kwargs): - """ - Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*. - - Valid keys are - - =============== ====================================================== - Key Description - =============== ====================================================== - arrowstyle the arrow style - connectionstyle the connection style - relpos default is (0.5, 0.5) - patchA default is bounding box of the text - patchB default is None - shrinkA default is 2 points - shrinkB default is 2 points - mutation_scale default is text size (in points) - mutation_aspect default is 1. - ? any key for `matplotlib.patches.PathPatch` - =============== ====================================================== - - *coordsA* and *coordsB* are strings that indicate the - coordinates of *xyA* and *xyB*. - - ==================== ================================================== - Property Description - ==================== ================================================== - 'figure points' points from the lower left corner of the figure - 'figure pixels' pixels from the lower left corner of the figure - 'figure fraction' 0, 0 is lower left of figure and 1, 1 is upper - right - 'subfigure points' points from the lower left corner of the subfigure - 'subfigure pixels' pixels from the lower left corner of the subfigure - 'subfigure fraction' fraction of the subfigure, 0, 0 is lower left. - 'axes points' points from lower left corner of axes - 'axes pixels' pixels from lower left corner of axes - 'axes fraction' 0, 0 is lower left of axes and 1, 1 is upper right - 'data' use the coordinate system of the object being - annotated (default) - 'offset points' offset (in points) from the *xy* value - 'polar' you can specify *theta*, *r* for the annotation, - even in cartesian plots. Note that if you are - using a polar axes, you do not need to specify - polar for the coordinate system since that is the - native "data" coordinate system. - ==================== ================================================== - - Alternatively they can be set to any valid - `~matplotlib.transforms.Transform`. - - Note that 'subfigure pixels' and 'figure pixels' are the same - for the parent figure, so users who want code that is usable in - a subfigure can use 'subfigure pixels'. - - .. note:: - - Using `ConnectionPatch` across two `~.axes.Axes` instances - is not directly compatible with :doc:`constrained layout - `. Add the artist - directly to the `.Figure` instead of adding it to a specific Axes, - or exclude it from the layout using ``con.set_in_layout(False)``. - - .. code-block:: default - - fig, ax = plt.subplots(1, 2, constrained_layout=True) - con = ConnectionPatch(..., axesA=ax[0], axesB=ax[1]) - fig.add_artist(con) - - """ - if coordsB is None: - coordsB = coordsA - # we'll draw ourself after the artist we annotate by default - self.xy1 = xyA - self.xy2 = xyB - self.coords1 = coordsA - self.coords2 = coordsB - - self.axesA = axesA - self.axesB = axesB - - super().__init__(posA=(0, 0), posB=(1, 1), - arrowstyle=arrowstyle, - connectionstyle=connectionstyle, - patchA=patchA, patchB=patchB, - shrinkA=shrinkA, shrinkB=shrinkB, - mutation_scale=mutation_scale, - mutation_aspect=mutation_aspect, - clip_on=clip_on, - **kwargs) - # if True, draw annotation only if self.xy is inside the axes - self._annotation_clip = None - - def _get_xy(self, xy, s, axes=None): - """Calculate the pixel position of given point.""" - s0 = s # For the error message, if needed. - if axes is None: - axes = self.axes - xy = np.array(xy) - if s in ["figure points", "axes points"]: - xy *= self.figure.dpi / 72 - s = s.replace("points", "pixels") - elif s == "figure fraction": - s = self.figure.transFigure - elif s == "subfigure fraction": - s = self.figure.transSubfigure - elif s == "axes fraction": - s = axes.transAxes - x, y = xy - - if s == 'data': - trans = axes.transData - x = float(self.convert_xunits(x)) - y = float(self.convert_yunits(y)) - return trans.transform((x, y)) - elif s == 'offset points': - if self.xycoords == 'offset points': # prevent recursion - return self._get_xy(self.xy, 'data') - return ( - self._get_xy(self.xy, self.xycoords) # converted data point - + xy * self.figure.dpi / 72) # converted offset - elif s == 'polar': - theta, r = x, y - x = r * np.cos(theta) - y = r * np.sin(theta) - trans = axes.transData - return trans.transform((x, y)) - elif s == 'figure pixels': - # pixels from the lower left corner of the figure - bb = self.figure.figbbox - x = bb.x0 + x if x >= 0 else bb.x1 + x - y = bb.y0 + y if y >= 0 else bb.y1 + y - return x, y - elif s == 'subfigure pixels': - # pixels from the lower left corner of the figure - bb = self.figure.bbox - x = bb.x0 + x if x >= 0 else bb.x1 + x - y = bb.y0 + y if y >= 0 else bb.y1 + y - return x, y - elif s == 'axes pixels': - # pixels from the lower left corner of the axes - bb = axes.bbox - x = bb.x0 + x if x >= 0 else bb.x1 + x - y = bb.y0 + y if y >= 0 else bb.y1 + y - return x, y - elif isinstance(s, transforms.Transform): - return s.transform(xy) - else: - raise ValueError(f"{s0} is not a valid coordinate transformation") - - def set_annotation_clip(self, b): - """ - Set the annotation's clipping behavior. - - Parameters - ---------- - b : bool or None - - True: The annotation will be clipped when ``self.xy`` is - outside the axes. - - False: The annotation will always be drawn. - - None: The annotation will be clipped when ``self.xy`` is - outside the axes and ``self.xycoords == "data"``. - """ - self._annotation_clip = b - self.stale = True - - def get_annotation_clip(self): - """ - Return the clipping behavior. - - See `.set_annotation_clip` for the meaning of the return value. - """ - return self._annotation_clip - - def _get_path_in_displaycoord(self): - """Return the mutated path of the arrow in display coordinates.""" - dpi_cor = self._dpi_cor - posA = self._get_xy(self.xy1, self.coords1, self.axesA) - posB = self._get_xy(self.xy2, self.coords2, self.axesB) - path = self.get_connectionstyle()( - posA, posB, - patchA=self.patchA, patchB=self.patchB, - shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor, - ) - path, fillable = self.get_arrowstyle()( - path, - self.get_mutation_scale() * dpi_cor, - self.get_linewidth() * dpi_cor, - self.get_mutation_aspect() - ) - return path, fillable - - def _check_xy(self, renderer): - """Check whether the annotation needs to be drawn.""" - - b = self.get_annotation_clip() - - if b or (b is None and self.coords1 == "data"): - xy_pixel = self._get_xy(self.xy1, self.coords1, self.axesA) - if self.axesA is None: - axes = self.axes - else: - axes = self.axesA - if not axes.contains_point(xy_pixel): - return False - - if b or (b is None and self.coords2 == "data"): - xy_pixel = self._get_xy(self.xy2, self.coords2, self.axesB) - if self.axesB is None: - axes = self.axes - else: - axes = self.axesB - if not axes.contains_point(xy_pixel): - return False - - return True - - def draw(self, renderer): - if renderer is not None: - self._renderer = renderer - if not self.get_visible() or not self._check_xy(renderer): - return - super().draw(renderer) diff --git a/spaces/leogabraneth/text-generation-webui-main/css/chat_style-cai-chat.css b/spaces/leogabraneth/text-generation-webui-main/css/chat_style-cai-chat.css deleted file mode 100644 index 547082b519f65b0dc78a0b010faaada4566bffbd..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/css/chat_style-cai-chat.css +++ /dev/null @@ -1,66 +0,0 @@ -.message { - display: grid; - grid-template-columns: 60px minmax(0, 1fr); - padding-bottom: 25px; - font-size: 15px; - font-family: 'Noto Sans', Helvetica, Arial, sans-serif; - line-height: 22.5px !important; -} - -.message-body { - margin-top: 3px; -} - -.circle-you { - width: 50px; - height: 50px; - background-color: rgb(238 78 59); - border-radius: 50%; -} - -.circle-bot { - width: 50px; - height: 50px; - background-color: rgb(59 78 244); - border-radius: 50%; -} - -.circle-bot img, -.circle-you img { - border-radius: 50%; - width: 100%; - height: 100%; - object-fit: cover; -} - -.username { - font-weight: bold; -} - -.message-body img { - max-width: 300px; - max-height: 300px; - border-radius: 20px; -} - -.message-body p { - font-size: 15px !important; - line-height: 22.5px !important; -} - -.message-body p, .chat .message-body ul, .chat .message-body ol { - margin-bottom: 10px !important; -} - -.message-body p:last-child, .chat .message-body ul:last-child, .chat .message-body ol:last-child { - margin-bottom: 0 !important; -} - -.dark .message-body p em { - color: rgb(138 138 138) !important; -} - -.message-body p em { - color: rgb(110 110 110) !important; - font-weight: 500; -} \ No newline at end of file diff --git a/spaces/lexlms/README/README.md b/spaces/lexlms/README/README.md deleted file mode 100644 index ff78ba0aac5465e6ff3d94f00966992e0bb46731..0000000000000000000000000000000000000000 --- a/spaces/lexlms/README/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: README -emoji: 🚀 -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- - -Initiang from the recent work of Chalkidis, Garneau, et al., "LeXFiles and LegalLAMA: Facilitating English Multinational Legal Language Model Development", we release legal NLP resources to broaden legal NLP research, while also helping practioners who aim to build assistive legal NLP technologies. - -As of May 2023, we released: - -- LeXFiles (https://huggingface.co/datasets/lexlms/lex_files), a new diverse English legal corpus including 11 sub-corpora that cover legislation and case law from 6 primarily English-speaking legal systems (EU, CoE, Canada, US, UK, India). The corpus comprises approx. 6 million documents which sum up to approx. 19 billion tokens. -- LegalLAMA (https://huggingface.co/datasets/lexlms/legal_lama), a diverse probing benchmark suite comprising 8 sub-tasks that aims to assess the acquaintance of legal knowledge that PLMs acquired in pre-training. -- 2 new legal-oriented PLMs, dubbed LexLMs (https://huggingface.co/models?search=lexlms/legal-roberta), warm-started from the RoBERTa models, and further pre-trained on LeXFiles for 1M additional steps. diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Aquifer Test Pro Download Serial Key !EXCLUSIVE!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Aquifer Test Pro Download Serial Key !EXCLUSIVE!.md deleted file mode 100644 index 2b3dff109da1fa7069720526d0f37bd124e4027b..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Aquifer Test Pro Download Serial Key !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      aquifer test pro download serial key


      DOWNLOAD 🌟 https://bytlly.com/2uGyFT



      -
      -Schlumberger AquiferTest Pro 2016.1 x86+x64 - Free Download. AquiferTest Pro is an easy to use software to perform the graphical analysis and ... incluyendo keygen, .dll y .exe suelen ser detectados como virus por lo software antivirus. ... But wen I go run the aoftware, there is no valid license available. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/linfanluntan/Grounded-SAM/segment_anything/segment_anything/modeling/prompt_encoder.py b/spaces/linfanluntan/Grounded-SAM/segment_anything/segment_anything/modeling/prompt_encoder.py deleted file mode 100644 index c3143f4f8e02ddd7ca8587b40ff5d47c3a6b7ef3..0000000000000000000000000000000000000000 --- a/spaces/linfanluntan/Grounded-SAM/segment_anything/segment_anything/modeling/prompt_encoder.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -from torch import nn - -from typing import Any, Optional, Tuple, Type - -from .common import LayerNorm2d - - -class PromptEncoder(nn.Module): - def __init__( - self, - embed_dim: int, - image_embedding_size: Tuple[int, int], - input_image_size: Tuple[int, int], - mask_in_chans: int, - activation: Type[nn.Module] = nn.GELU, - ) -> None: - """ - Encodes prompts for input to SAM's mask decoder. - - Arguments: - embed_dim (int): The prompts' embedding dimension - image_embedding_size (tuple(int, int)): The spatial size of the - image embedding, as (H, W). - input_image_size (int): The padded size of the image as input - to the image encoder, as (H, W). - mask_in_chans (int): The number of hidden channels used for - encoding input masks. - activation (nn.Module): The activation to use when encoding - input masks. - """ - super().__init__() - self.embed_dim = embed_dim - self.input_image_size = input_image_size - self.image_embedding_size = image_embedding_size - self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) - - self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners - point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] - self.point_embeddings = nn.ModuleList(point_embeddings) - self.not_a_point_embed = nn.Embedding(1, embed_dim) - - self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) - self.mask_downscaling = nn.Sequential( - nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), - LayerNorm2d(mask_in_chans // 4), - activation(), - nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), - LayerNorm2d(mask_in_chans), - activation(), - nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), - ) - self.no_mask_embed = nn.Embedding(1, embed_dim) - - def get_dense_pe(self) -> torch.Tensor: - """ - Returns the positional encoding used to encode point prompts, - applied to a dense set of points the shape of the image encoding. - - Returns: - torch.Tensor: Positional encoding with shape - 1x(embed_dim)x(embedding_h)x(embedding_w) - """ - return self.pe_layer(self.image_embedding_size).unsqueeze(0) - - def _embed_points( - self, - points: torch.Tensor, - labels: torch.Tensor, - pad: bool, - ) -> torch.Tensor: - """Embeds point prompts.""" - points = points + 0.5 # Shift to center of pixel - if pad: - padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) - padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) - points = torch.cat([points, padding_point], dim=1) - labels = torch.cat([labels, padding_label], dim=1) - point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) - point_embedding[labels == -1] = 0.0 - point_embedding[labels == -1] += self.not_a_point_embed.weight - point_embedding[labels == 0] += self.point_embeddings[0].weight - point_embedding[labels == 1] += self.point_embeddings[1].weight - return point_embedding - - def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: - """Embeds box prompts.""" - boxes = boxes + 0.5 # Shift to center of pixel - coords = boxes.reshape(-1, 2, 2) - corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) - corner_embedding[:, 0, :] += self.point_embeddings[2].weight - corner_embedding[:, 1, :] += self.point_embeddings[3].weight - return corner_embedding - - def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: - """Embeds mask inputs.""" - mask_embedding = self.mask_downscaling(masks) - return mask_embedding - - def _get_batch_size( - self, - points: Optional[Tuple[torch.Tensor, torch.Tensor]], - boxes: Optional[torch.Tensor], - masks: Optional[torch.Tensor], - ) -> int: - """ - Gets the batch size of the output given the batch size of the input prompts. - """ - if points is not None: - return points[0].shape[0] - elif boxes is not None: - return boxes.shape[0] - elif masks is not None: - return masks.shape[0] - else: - return 1 - - def _get_device(self) -> torch.device: - return self.point_embeddings[0].weight.device - - def forward( - self, - points: Optional[Tuple[torch.Tensor, torch.Tensor]], - boxes: Optional[torch.Tensor], - masks: Optional[torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Embeds different types of prompts, returning both sparse and dense - embeddings. - - Arguments: - points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates - and labels to embed. - boxes (torch.Tensor or none): boxes to embed - masks (torch.Tensor or none): masks to embed - - Returns: - torch.Tensor: sparse embeddings for the points and boxes, with shape - BxNx(embed_dim), where N is determined by the number of input points - and boxes. - torch.Tensor: dense embeddings for the masks, in the shape - Bx(embed_dim)x(embed_H)x(embed_W) - """ - bs = self._get_batch_size(points, boxes, masks) - sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) - if points is not None: - coords, labels = points - point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) - sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) - if boxes is not None: - box_embeddings = self._embed_boxes(boxes) - sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) - - if masks is not None: - dense_embeddings = self._embed_masks(masks) - else: - dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( - bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] - ) - - return sparse_embeddings, dense_embeddings - - -class PositionEmbeddingRandom(nn.Module): - """ - Positional encoding using random spatial frequencies. - """ - - def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: - super().__init__() - if scale is None or scale <= 0.0: - scale = 1.0 - self.register_buffer( - "positional_encoding_gaussian_matrix", - scale * torch.randn((2, num_pos_feats)), - ) - - def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: - """Positionally encode points that are normalized to [0,1].""" - # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape - coords = 2 * coords - 1 - coords = coords @ self.positional_encoding_gaussian_matrix - coords = 2 * np.pi * coords - # outputs d_1 x ... x d_n x C shape - return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) - - def forward(self, size: Tuple[int, int]) -> torch.Tensor: - """Generate positional encoding for a grid of the specified size.""" - h, w = size - device: Any = self.positional_encoding_gaussian_matrix.device - grid = torch.ones((h, w), device=device, dtype=torch.float32) - y_embed = grid.cumsum(dim=0) - 0.5 - x_embed = grid.cumsum(dim=1) - 0.5 - y_embed = y_embed / h - x_embed = x_embed / w - - pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) - return pe.permute(2, 0, 1) # C x H x W - - def forward_with_coords( - self, coords_input: torch.Tensor, image_size: Tuple[int, int] - ) -> torch.Tensor: - """Positionally encode points that are not normalized to [0,1].""" - coords = coords_input.clone() - coords[:, :, 0] = coords[:, :, 0] / image_size[1] - coords[:, :, 1] = coords[:, :, 1] / image_size[0] - return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/spaces/lj1995/vocal2guitar/onnx_inference_demo.py b/spaces/lj1995/vocal2guitar/onnx_inference_demo.py deleted file mode 100644 index 14e75d0eb4a5dc3542ce1ed6d462c70c7f4e5679..0000000000000000000000000000000000000000 --- a/spaces/lj1995/vocal2guitar/onnx_inference_demo.py +++ /dev/null @@ -1,20 +0,0 @@ -import soundfile -from infer_pack.onnx_inference import OnnxRVC - -hop_size = 512 -sampling_rate = 40000 # 采样率 -f0_up_key = 0 # 升降调 -sid = 0 # 角色ID -f0_method = "dio" # F0提取算法 -model_path = "ShirohaRVC.onnx" # 模型的完整路径 -vec_name = "vec-256-layer-9" # 内部自动补齐为 f"pretrained/{vec_name}.onnx" 需要onnx的vec模型 -wav_path = "123.wav" # 输入路径或ByteIO实例 -out_path = "out.wav" # 输出路径或ByteIO实例 - -model = OnnxRVC( - model_path, vec_path=vec_name, sr=sampling_rate, hop_size=hop_size, device="cuda" -) - -audio = model.inference(wav_path, sid, f0_method=f0_method, f0_up_key=f0_up_key) - -soundfile.write(out_path, audio, sampling_rate) diff --git a/spaces/ltgoslo/ssa-perin/data/parser/from_mrp/labeled_edge_parser.py b/spaces/ltgoslo/ssa-perin/data/parser/from_mrp/labeled_edge_parser.py deleted file mode 100644 index 743b900c1dda0ddd95593dc3dffcf06139e71ccd..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/data/parser/from_mrp/labeled_edge_parser.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# coding=utf-8 - -from data.parser.from_mrp.abstract_parser import AbstractParser -import utility.parser_utils as utils - - -class LabeledEdgeParser(AbstractParser): - def __init__(self, args, part: str, fields, filter_pred=None, **kwargs): - assert part == "training" or part == "validation" - path = args.training_data if part == "training" else args.validation_data - - self.data = utils.load_dataset(path) - utils.anchor_ids_from_intervals(self.data) - - self.node_counter, self.edge_counter, self.no_edge_counter = 0, 0, 0 - anchor_count, n_node_token_pairs = 0, 0 - - for sentence_id, sentence in list(self.data.items()): - for edge in sentence["edges"]: - if "label" not in edge: - del self.data[sentence_id] - break - - for node, sentence in utils.node_generator(self.data): - node["label"] = "Node" - - self.node_counter += 1 - - utils.create_bert_tokens(self.data, args.encoder) - - # create edge vectors - for sentence in self.data.values(): - assert sentence["tops"] == [0], sentence - N = len(sentence["nodes"]) - - edge_count = utils.create_edges(sentence) - self.edge_counter += edge_count - self.no_edge_counter += N * (N - 1) - edge_count - - sentence["nodes"] = sentence["nodes"][1:] - N = len(sentence["nodes"]) - - sentence["anchor edges"] = [N, len(sentence["input"]), []] - sentence["source anchor edges"] = [N, len(sentence["input"]), []] # dummy - sentence["target anchor edges"] = [N, len(sentence["input"]), []] # dummy - sentence["anchored labels"] = [len(sentence["input"]), []] - for i, node in enumerate(sentence["nodes"]): - anchored_labels = [] - - for anchor in node["anchors"]: - sentence["anchor edges"][-1].append((i, anchor)) - anchored_labels.append((anchor, node["label"])) - - sentence["anchored labels"][1].append(anchored_labels) - - anchor_count += len(node["anchors"]) - n_node_token_pairs += len(sentence["input"]) - - sentence["id"] = [sentence["id"]] - - self.anchor_freq = anchor_count / n_node_token_pairs - self.source_anchor_freq = self.target_anchor_freq = 0.5 # dummy - self.input_count = sum(len(sentence["input"]) for sentence in self.data.values()) - - super(LabeledEdgeParser, self).__init__(fields, self.data, filter_pred) - - @staticmethod - def node_similarity_key(node): - return tuple([node["label"]] + node["anchors"]) diff --git a/spaces/ltgoslo/ssa-perin/mtool/README.md b/spaces/ltgoslo/ssa-perin/mtool/README.md deleted file mode 100644 index 6004464e4bd9a5e638755e95be74853c490de801..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/mtool/README.md +++ /dev/null @@ -1,268 +0,0 @@ -mtool -===== - - **The Swiss Army Knife of Meaning Representation** - -This repository provides software to support participants in the -shared tasks on [Meaning Representation Parsing (MRP)](http://mrp.nlpl.eu) -at the -[2019](http://www.conll.org/2019) and -[2020 Conference on Computational Natural Language Learning](http://www.conll.org/2020) (CoNLL). - -Please see the above task web site for additional background. - -Scoring -------- - -`mtool` implements the official MRP 2019 cross-framwork metric, as well as -a range of framework-specific graph similarity metrics, viz. - -+ MRP (Maximum Common Edge Subgraph Isomorphism); -+ EDM (Elementary Dependency Match; [Dridan & Oepen, 2011](http://aclweb.org/anthology/W/W11/W11-2927.pdf)); -+ SDP Labeled and Unlabeled Dependency F1 ([Oepen et al., 2015](http://aclweb.org/anthology/S/S14/S14-2008.pdf)); -+ SMATCH Precision, Recall, and F1 ([Cai & Knight, 2013](http://www.aclweb.org/anthology/P13-2131)); -+ UCCA Labeled and Unlabeled Dependency F1 ([Hershcovich et al., 2019](https://www.aclweb.org/anthology/S19-2001)). - -The ‘official’ cross-framework metric for the MRP 2019 shared task is a generalization -of the framework-specific metrics, considering all applicable ‘pieces of information’ (i.e. -tuples representing basic structural elements) for each framework: - -1. top nodes; -2. node labels; -3. node properties; -4. node anchoring; -5. directed edges; -6. edge labels; and -7. edge attributes. - -When comparing two graphs, node-to-node correspondences need to be established (via a -potentially approximative search) to maximize the aggregate, unweighted score of all of the tuple -types that apply for each specific framework. -Directed edges and edge labels, however, are always considered in conjunction during -this search. -``` -./main.py --read mrp --score mrp --gold data/sample/eds/wsj.mrp data/score/eds/wsj.pet.mrp -{"n": 87, - "tops": {"g": 87, "s": 87, "c": 85, "p": 0.9770114942528736, "r": 0.9770114942528736, "f": 0.9770114942528736}, - "labels": {"g": 2500, "s": 2508, "c": 2455, "p": 0.9788676236044657, "r": 0.982, "f": 0.9804313099041533}, - "properties": {"g": 262, "s": 261, "c": 257, "p": 0.9846743295019157, "r": 0.9809160305343512, "f": 0.982791586998088}, - "anchors": {"g": 2500, "s": 2508, "c": 2430, "p": 0.9688995215311005, "r": 0.972, "f": 0.9704472843450479}, - "edges": {"g": 2432, "s": 2439, "c": 2319, "p": 0.95079950799508, "r": 0.9535361842105263, "f": 0.952165879696161}, - "attributes": {"g": 0, "s": 0, "c": 0, "p": 0.0, "r": 0.0, "f": 0.0}, - "all": {"g": 7781, "s": 7803, "c": 7546, "p": 0.9670639497629117, "r": 0.9697982264490426, "f": 0.9684291581108829}} -``` -Albeit originally defined for one specific framework (EDS, DM and PSD, AMR, or UCCA, respectively), -the pre-MRP metrics are to some degree applicable to other frameworks too: the unified MRP representation -of semantic graphs enables such cross-framework application, in principle, but this functionality -remains largely untested (as of June 2019). - -The `Makefile` in the `data/score/` sub-directory shows some example calls for the MRP scorer. -As appropriate (e.g. for comparison to third-party results), it is possible to score graphs in -each framework using its ‘own’ metric, for example (for AMR and UCCA, respectively): -``` -./main.py --read mrp --score smatch --gold data/score/amr/test1.mrp data/score/amr/test2.mrp -{"n": 3, "g": 30, "s": 29, "c": 24, "p": 0.8, "r": 0.8275862068965517, "f": 0.8135593220338982} -``` - -``` -./main.py --read mrp --score ucca --gold data/score/ucca/ewt.gold.mrp data/score/ucca/ewt.tupa.mrp -{"n": 3757, - "labeled": - {"primary": {"g": 63720, "s": 62876, "c": 38195, - "p": 0.6074654876264394, "r": 0.5994193345888261, "f": 0.6034155897500711}, - "remote": {"g": 2673, "s": 1259, "c": 581, - "p": 0.4614773629864972, "r": 0.21735877291432848, "f": 0.2955239064089522}}, - "unlabeled": - {"primary": {"g": 56114, "s": 55761, "c": 52522, - "p": 0.9419128064417783, "r": 0.9359874541112735, "f": 0.938940782122905}, - "remote": {"g": 2629, "s": 1248, "c": 595, - "p": 0.47676282051282054, "r": 0.22632179535945227, "f": 0.3069383543977302}}} -``` - -For all scorers, the `--trace` command-line option will enable per-item scores in the result -(indexed by frameworks and graph identifiers). -For MRP and SMATCH, the `--limit` option controls the maximum node pairing steps or -hill-climbing iterations, respectively, to attempt during the search (with defaults `500000` -and `20`, respectively). -As of early July, 2019, the search for none-to-node correspondences in the MRP metric can be -initialized from the result of the random-restart hill-climbing (RRHC) search from SMATCH. -This initialization is on by default; it increases running time of the MRP scorer but yields -a guarantee that the `"all"` counts of matching tuples in MRP will always be at least as -high as the number of `"c"`(orrect) tuples identified by SMATCH. -To control the two search steps in MRP computation separately, the `--limit` option can -take a colon-separated pair of integers, for example `5:100000` for five hill-climbing -iterations and up to 100,000 node pairing steps. -Note that multi-valued use of the `--limit` option is only meaningful in conjunction -with the MRP metric, and that setting either of the two values to `0` will disable the -corresponding search component. -Finally, the MRP scorer can parallelize evaluation: an option like `--cores 8` (on -suitable hardware) will run eight `mtool` processes in parallel, which should reduce -scoring time substantially. - -Analytics ---------- - -[Kuhlmann & Oepen (2016)](http://www.mitpressjournals.org/doi/pdf/10.1162/COLI_a_00268) discuss a range of structural graph statistics; `mtool` integrates their original code, e.g. -``` -./main.py --read mrp --analyze data/sample/amr/wsj.mrp -(01) number of graphs 87 -(02) number of edge labels 52 -(03) \percentgraph\ trees 51.72 -(04) \percentgraph\ treewidth one 51.72 -(05) average treewidth 1.494 -(06) maximal treewidth 3 -(07) average edge density 1.050 -(08) \percentnode\ reentrant 4.24 -(09) \percentgraph\ cyclic 13.79 -(10) \percentgraph\ not connected 0.00 -(11) \percentgraph\ multi-rooted 0.00 -(12) percentage of non-top roots 0.00 -(13) average edge length -- -(14) \percentgraph\ noncrossing -- -(15) \percentgraph\ pagenumber two -- -``` - -Validation ----------- - -`mtool` can test high-level wellformedness and (superficial) plausiblity of MRP -graphs through its emerging `--validate` option. -The MRP validator continues to evolve, but the following is indicative of its -functionality: -``` -./main.py --read mrp --validate all data/validate/eds/wsj.mrp -validate(): graph ‘20001001’: missing or invalid ‘input’ property -validate(): graph ‘20001001’; node #0: missing or invalid label -validate(): graph ‘20001001’; node #1: missing or invalid label -validate(): graph ‘20001001’; node #3: missing or invalid anchoring -validate(): graph ‘20001001’; node #6: invalid ‘anchors’ value: [{'from': 15, 'to': 23}, {'from': 15, 'to': 23}] -validate(): graph ‘20001001’; node #7: invalid ‘anchors’ value: [{'form': 15, 'to': 17}] -``` - -Conversion ----------- - -Among its options for format coversion, `mtool` supports output of graphs to the -[DOT language](https://www.graphviz.org/documentation/) for graph visualization, e.g. -``` -./main.py --id 20001001 --read mrp --write dot data/sample/eds/wsj.mrp 20001001.dot -dot -Tpdf 20001001.dot > 20001001.pdf -``` -When converting from token-based file formats that may lack either the underlying -‘raw’ input string, character-based anchoring, or both, the `--text` command-line -option will enable recovery of inputs and attempt to determine anchoring. -Its argument must be a file containing pairs of identifiers and input strings, one -per line, separated by a tabulator, e.g. -``` -./main.py --id 20012005 --text data/sample/wsj.txt --read dm --write dot data/sample/psd/wsj.sdp 20012005.dot -``` -For increased readability, the `--ids` option will include MRP node identifiers -in graph rendering, and the `--strings` option can replace character-based -anchors with the corresponding sub-string from the `input` field of the graph -(currently only for the DOT output format), e.g. -``` -./main.py --n 1 --strings --read mrp --write dot data/sample/ucca/wsj.mrp vinken.dot -``` - -Diagnostics --------------- - -When scoring with the MRP metric, `mtool` can optionally provide a per-item -breakdown of differences between the gold and the system graphs, i.e. record -false negatives (‘missing’ tuples) and false positives (‘surplus’ ones). -This functionality is activated via the `--errors` command-line option, and -tuple mismatches between the two graphs are recorded as a hierarchically -nested JSON object, indexed (in order) by framework, item identifier, and tuple -type. - -For example: -``` -./main.py --read mrp --score mrp --framework eds --gold data/score/lpps.mrp --errors errors.json data/score/eds/lpps.peking.mrp -``` -For the first EDS item (`#102990`) in this comparison, `errors.json` will -contain a sub-structure like the following: -``` -{"correspondences": [[0, 0], [1, 1], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], - [11, 12], [12, 13], [13, 15], [14, 16], [15, 17], [16, 14], [17, 18], [18, 19], [19, 20]], - "labels": {"missing": [[2, "_very+much_a_1"]], - "surplus": [[3, "_much_x_deg"], [2, "_very_x_deg"]]}, - "anchors": {"missing": [[2, [6, 7, 8, 9, 11, 12, 13, 14]]], - "surplus": [[2, [6, 7, 8, 9]], [3, [11, 12, 13, 14]]]}, - "edges": {"surplus": [[2, 3, "arg1"]]}} -``` -When interpreting this structure, there are (of course) two separate spaces of -node identifiers; the `correspondences` vector records the (optimal) -node-to-node relation found by the MRP scorer, pairing identifiers from the -*gold* graph with corresponding identifiers in the *system* graph. -In the above, for example, gold node `#2` corresponds to system node `#3`, -and there is a spurious node `#2` in the example system graph, which -does not correspond to any of the gold nodes. -Node identifiers in `"missing"` entries refer to gold nodes, whereas -identifiers in `"surplus"` entries refer to the system graph, and they may -or may not stand in a correspondence relation to a gold node. - -The differences between these two graphs can be visualized as follows, color-coding -false negatives in red, and false positives in blue -(and using gold identifiers, where available). - -![sample visualization](https://github.com/cfmrp/mtool/blob/master/data/score/eds/lpps.102990.png) - -Common Options --------------- - -The `--read` and `--write` command-line options determine the input and output -codecs to use. -Valid input arguments include `mrp`, `amr`, `ccd`, `dm`, `eds`, `pas`, `psd`, `ud`, `eud`, -and `ucca`; note that some of these formats are only [partially supported](https://github.com/cfmrp/mtool/issues). -The range of supported output codecs includes `mrp`, `dot`, or `txt`. - -The optional `--id`, `--i`, or `--n` options control which graph(s) -from the input file(s) to process, selecting either by identifier, by (zero-based) -position into the sequence of graphs read from the file, or using the first _n_ -graphs. -These options cannot be combined with each other and take precendence over each -other in the above order. - -Another way of selecting only a subset of graphs (from both the gold and -system inputs) is the `--framework` option, which will limit the selection -to graphs with matching `"framework"` values. -Finally, the `--unique` option will discard graphs with multiple occurences -of the same identifier, keeping only the first occurence from the input stream. - -Most top-level graph properties (`"id"`, `"time"`, `"source"`, `"provenance"`, -`"language"`, `"flavor"`, `"framework"`, `"targets"`, `"input"`) can be set -(or destructively overwritten, upon completion of input processing) using the -`--inject` option, which takes as its argument a JSON object, e.g. -``` -./main.py --text wsj.txt --read eds \ - --inject '{"source": "wsj", "provenance": "Redwoods Ninth Growth (ERG 1214)"}' \ - --write mrp wsj.eds wsj.mrp -``` - -Installation ------------- - -You can install `mtool` via `pip` with the following command: - -``` -pip install git+https://github.com/cfmrp/mtool.git#egg=mtool -``` - -Authors -------- - -+ Daniel Hershcovich (@danielhers) -+ Marco Kuhlmann (@khlmnn) -+ Stephan Oepen (@oepen) -+ Tim O'Gorman (@timjogorman) - -Contributors ------------- - -+ Yuta Koreeda (@koreyou) -+ Matthias Lindemann (@namednil) -+ Hiroaki Ozaki (@taryou) -+ Milan Straka (@foxik) - -[![Build Status (Travis CI)](https://travis-ci.org/cfmrp/mtool.svg?branch=master)](https://travis-ci.org/cfmrp/mtool) -[![Build Status (AppVeyor)](https://ci.appveyor.com/api/projects/status/github/cfmrp/mtool?svg=true)](https://ci.appveyor.com/project/danielh/mtool) diff --git a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/README.md b/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/README.md deleted file mode 100644 index 7caf773b12ab1d5595a76af0628fc0255c646b1f..0000000000000000000000000000000000000000 --- a/spaces/lychees/Stable-Diffusion-ControlNet-WebUI/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Stable Diffusion ControlNet WebUI -emoji: 🚀 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.19 -app_file: app.py -pinned: true -license: openrail -tags: -- making-demos -duplicated_from: ArtGAN/Stable-Diffusion-ControlNet-WebUI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/config/host_system.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/config/host_system.h deleted file mode 100644 index 5c13878032206d560e0ea5115b014af3841dc7b3..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/config/host_system.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -// reserve 0 for undefined -#define THRUST_HOST_SYSTEM_CPP 1 -#define THRUST_HOST_SYSTEM_OMP 2 -#define THRUST_HOST_SYSTEM_TBB 3 - -#ifndef THRUST_HOST_SYSTEM -#define THRUST_HOST_SYSTEM THRUST_HOST_SYSTEM_CPP -#endif // THRUST_HOST_SYSTEM - -// XXX make the use of THRUST_HOST_BACKEND an error in Thrust 1.7 -// XXX eliminate the following in Thrust 1.7 - -#define THRUST_HOST_BACKEND_CPP THRUST_HOST_SYSTEM_CPP -#define THRUST_HOST_BACKEND_OMP THRUST_HOST_SYSTEM_OMP -#define THRUST_HOST_BACKEND_TBB THRUST_HOST_SYSTEM_TBB - -#ifdef THRUST_HOST_BACKEND -# if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC -# pragma message("------------------------------------------------------------------------------") -# pragma message("| WARNING: THRUST_HOST_BACKEND is deprecated; use THRUST_HOST_SYSTEM instead |") -# pragma message("------------------------------------------------------------------------------") -# else -# warning ------------------------------------------------------------------------------ -# warning | WARNING: THRUST_HOST_BACKEND is deprecated; use THRUST_HOST_SYSTEM instead | -# warning ------------------------------------------------------------------------------ -# endif // THRUST_HOST_COMPILER -# undef THRUST_HOST_SYSTEM -# define THRUST_HOST_SYSTEM THRUST_HOST_BACKEND -#endif // THRUST_HOST_BACKEND - -#if THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_CPP -#define __THRUST_HOST_SYSTEM_NAMESPACE cpp -#elif THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_OMP -#define __THRUST_HOST_SYSTEM_NAMESPACE omp -#elif THRUST_HOST_SYSTEM == THRUST_HOST_SYSTEM_TBB -#define __THRUST_HOST_SYSTEM_NAMESPACE tbb -#endif - -#define __THRUST_HOST_SYSTEM_ROOT thrust/system/__THRUST_HOST_SYSTEM_NAMESPACE - diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/img_process_util.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/img_process_util.py deleted file mode 100644 index 52e02f09930dbf13bcd12bbe16b76e4fce52578e..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/utils/img_process_util.py +++ /dev/null @@ -1,83 +0,0 @@ -import cv2 -import numpy as np -import torch -from torch.nn import functional as F - - -def filter2D(img, kernel): - """PyTorch version of cv2.filter2D - - Args: - img (Tensor): (b, c, h, w) - kernel (Tensor): (b, k, k) - """ - k = kernel.size(-1) - b, c, h, w = img.size() - if k % 2 == 1: - img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect') - else: - raise ValueError('Wrong kernel size') - - ph, pw = img.size()[-2:] - - if kernel.size(0) == 1: - # apply the same kernel to all batch images - img = img.view(b * c, 1, ph, pw) - kernel = kernel.view(1, 1, k, k) - return F.conv2d(img, kernel, padding=0).view(b, c, h, w) - else: - img = img.view(1, b * c, ph, pw) - kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k) - return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w) - - -def usm_sharp(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. - - Input image: I; Blurry image: B. - 1. sharp = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * sharp + (1 - Mask) * I - - - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - sharp = img + weight * residual - sharp = np.clip(sharp, 0, 1) - return soft_mask * sharp + (1 - soft_mask) * img - - -class USMSharp(torch.nn.Module): - - def __init__(self, radius=50, sigma=0): - super(USMSharp, self).__init__() - if radius % 2 == 0: - radius += 1 - self.radius = radius - kernel = cv2.getGaussianKernel(radius, sigma) - kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0) - self.register_buffer('kernel', kernel) - - def forward(self, img, weight=0.5, threshold=10): - blur = filter2D(img, self.kernel) - residual = img - blur - - mask = torch.abs(residual) * 255 > threshold - mask = mask.float() - soft_mask = filter2D(mask, self.kernel) - sharp = img + weight * residual - sharp = torch.clip(sharp, 0, 1) - return soft_mask * sharp + (1 - soft_mask) * img diff --git a/spaces/mattthew/SDXL-artists-browser/README.md b/spaces/mattthew/SDXL-artists-browser/README.md deleted file mode 100644 index b6969a3d02cc087a81e2170011b563b84ae51498..0000000000000000000000000000000000000000 --- a/spaces/mattthew/SDXL-artists-browser/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: SD Artists Browser -emoji: 🤘 -colorFrom: indigo -colorTo: purple -sdk: static -pinned: false -license: cc-by-sa-4.0 ---- -# Another SDXL artist browser?! - -Yes, because it's better: -### Most complete database of tags -* Each artist has at least 6 tags -* Mediums, styles, themes, periods, subject matter, and more -### Largest database of artists: ~~3000k~~ (temporarily 500 due to beta) -* Sort artists by random to discover new ones -* Browse image thumbnails and view at full resolution -* Three images each: 🎨 artwork, 🧑 portrait, and 🏞️ landscape -### Easiest to use filters -* Sort filters by name or count of matches -* Choose strict or permissive when combining filters -### Save your favorites -* Export or import your favorites too -### Community first -* Transparent: Open source Creatives Commons database -* Private: No cookies, nothing sent to server, use offline -* Note: I can't control what data Huggingface collects - -# How to support this project -* Please tell a friends or share on your socials -* Suggest artists I should add or remove -* Suggest features and report bugs \ No newline at end of file diff --git a/spaces/megatron7/bert-base-chinese/README.md b/spaces/megatron7/bert-base-chinese/README.md deleted file mode 100644 index 15bafa69e8e9bafdf6f2325048597c74a9fbc851..0000000000000000000000000000000000000000 --- a/spaces/megatron7/bert-base-chinese/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bert Base Chinese -emoji: 🔥 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/setup.py b/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/meraih/English-Japanese-Anime-TTS/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/transformer.py b/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/transformer.py deleted file mode 100644 index fcb8742dbdde6e80fd38b11d064211f6935aae76..0000000000000000000000000000000000000000 --- a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/transformer.py +++ /dev/null @@ -1,959 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# DINO -# Copyright (c) 2022 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR Transformer class. -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -from typing import Optional - -import torch -import torch.utils.checkpoint as checkpoint -from torch import Tensor, nn - -from groundingdino.util.misc import inverse_sigmoid - -from .fuse_modules import BiAttentionBlock -from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn -from .transformer_vanilla import TransformerEncoderLayer -from .utils import ( - MLP, - _get_activation_fn, - _get_clones, - gen_encoder_output_proposals, - gen_sineembed_for_position, - get_sine_pos_embed, -) - - -class Transformer(nn.Module): - def __init__( - self, - d_model=256, - nhead=8, - num_queries=300, - num_encoder_layers=6, - num_unicoder_layers=0, - num_decoder_layers=6, - dim_feedforward=2048, - dropout=0.0, - activation="relu", - normalize_before=False, - return_intermediate_dec=False, - query_dim=4, - num_patterns=0, - # for deformable encoder - num_feature_levels=1, - enc_n_points=4, - dec_n_points=4, - # init query - learnable_tgt_init=False, - # two stage - two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1'] - embed_init_tgt=False, - # for text - use_text_enhancer=False, - use_fusion_layer=False, - use_checkpoint=False, - use_transformer_ckpt=False, - use_text_cross_attention=False, - text_dropout=0.1, - fusion_dropout=0.1, - fusion_droppath=0.0, - ): - super().__init__() - self.num_feature_levels = num_feature_levels - self.num_encoder_layers = num_encoder_layers - self.num_unicoder_layers = num_unicoder_layers - self.num_decoder_layers = num_decoder_layers - self.num_queries = num_queries - assert query_dim == 4 - - # choose encoder layer type - encoder_layer = DeformableTransformerEncoderLayer( - d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points - ) - - if use_text_enhancer: - text_enhance_layer = TransformerEncoderLayer( - d_model=d_model, - nhead=nhead // 2, - dim_feedforward=dim_feedforward // 2, - dropout=text_dropout, - ) - else: - text_enhance_layer = None - - if use_fusion_layer: - feature_fusion_layer = BiAttentionBlock( - v_dim=d_model, - l_dim=d_model, - embed_dim=dim_feedforward // 2, - num_heads=nhead // 2, - dropout=fusion_dropout, - drop_path=fusion_droppath, - ) - else: - feature_fusion_layer = None - - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - assert encoder_norm is None - self.encoder = TransformerEncoder( - encoder_layer, - num_encoder_layers, - d_model=d_model, - num_queries=num_queries, - text_enhance_layer=text_enhance_layer, - feature_fusion_layer=feature_fusion_layer, - use_checkpoint=use_checkpoint, - use_transformer_ckpt=use_transformer_ckpt, - ) - - # choose decoder layer type - decoder_layer = DeformableTransformerDecoderLayer( - d_model, - dim_feedforward, - dropout, - activation, - num_feature_levels, - nhead, - dec_n_points, - use_text_cross_attention=use_text_cross_attention, - ) - - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder( - decoder_layer, - num_decoder_layers, - decoder_norm, - return_intermediate=return_intermediate_dec, - d_model=d_model, - query_dim=query_dim, - num_feature_levels=num_feature_levels, - ) - - self.d_model = d_model - self.nhead = nhead - self.dec_layers = num_decoder_layers - self.num_queries = num_queries # useful for single stage model only - self.num_patterns = num_patterns - if not isinstance(num_patterns, int): - Warning("num_patterns should be int but {}".format(type(num_patterns))) - self.num_patterns = 0 - - if num_feature_levels > 1: - if self.num_encoder_layers > 0: - self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) - else: - self.level_embed = None - - self.learnable_tgt_init = learnable_tgt_init - assert learnable_tgt_init, "why not learnable_tgt_init" - self.embed_init_tgt = embed_init_tgt - if (two_stage_type != "no" and embed_init_tgt) or (two_stage_type == "no"): - self.tgt_embed = nn.Embedding(self.num_queries, d_model) - nn.init.normal_(self.tgt_embed.weight.data) - else: - self.tgt_embed = None - - # for two stage - self.two_stage_type = two_stage_type - assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( - two_stage_type - ) - if two_stage_type == "standard": - # anchor selection at the output of encoder - self.enc_output = nn.Linear(d_model, d_model) - self.enc_output_norm = nn.LayerNorm(d_model) - self.two_stage_wh_embedding = None - - if two_stage_type == "no": - self.init_ref_points(num_queries) # init self.refpoint_embed - - self.enc_out_class_embed = None - self.enc_out_bbox_embed = None - - self._reset_parameters() - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - for m in self.modules(): - if isinstance(m, MSDeformAttn): - m._reset_parameters() - if self.num_feature_levels > 1 and self.level_embed is not None: - nn.init.normal_(self.level_embed) - - def get_valid_ratio(self, mask): - _, H, W = mask.shape - valid_H = torch.sum(~mask[:, :, 0], 1) - valid_W = torch.sum(~mask[:, 0, :], 1) - valid_ratio_h = valid_H.float() / H - valid_ratio_w = valid_W.float() / W - valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) - return valid_ratio - - def init_ref_points(self, use_num_queries): - self.refpoint_embed = nn.Embedding(use_num_queries, 4) - - def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None, text_dict=None): - """ - Input: - - srcs: List of multi features [bs, ci, hi, wi] - - masks: List of multi masks [bs, hi, wi] - - refpoint_embed: [bs, num_dn, 4]. None in infer - - pos_embeds: List of multi pos embeds [bs, ci, hi, wi] - - tgt: [bs, num_dn, d_model]. None in infer - - """ - # prepare input for encoder - src_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): - bs, c, h, w = src.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - - src = src.flatten(2).transpose(1, 2) # bs, hw, c - mask = mask.flatten(1) # bs, hw - pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c - if self.num_feature_levels > 1 and self.level_embed is not None: - lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) - else: - lvl_pos_embed = pos_embed - lvl_pos_embed_flatten.append(lvl_pos_embed) - src_flatten.append(src) - mask_flatten.append(mask) - src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c - mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw} - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=src_flatten.device - ) - level_start_index = torch.cat( - (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]) - ) - valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) - - # two stage - enc_topk_proposals = enc_refpoint_embed = None - - ######################################################### - # Begin Encoder - ######################################################### - memory, memory_text = self.encoder( - src_flatten, - pos=lvl_pos_embed_flatten, - level_start_index=level_start_index, - spatial_shapes=spatial_shapes, - valid_ratios=valid_ratios, - key_padding_mask=mask_flatten, - memory_text=text_dict["encoded_text"], - text_attention_mask=~text_dict["text_token_mask"], - # we ~ the mask . False means use the token; True means pad the token - position_ids=text_dict["position_ids"], - text_self_attention_masks=text_dict["text_self_attention_masks"], - ) - ######################################################### - # End Encoder - # - memory: bs, \sum{hw}, c - # - mask_flatten: bs, \sum{hw} - # - lvl_pos_embed_flatten: bs, \sum{hw}, c - # - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c) - # - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c) - ######################################################### - text_dict["encoded_text"] = memory_text - # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': - # if memory.isnan().any() | memory.isinf().any(): - # import ipdb; ipdb.set_trace() - - if self.two_stage_type == "standard": - output_memory, output_proposals = gen_encoder_output_proposals( - memory, mask_flatten, spatial_shapes - ) - output_memory = self.enc_output_norm(self.enc_output(output_memory)) - - if text_dict is not None: - enc_outputs_class_unselected = self.enc_out_class_embed(output_memory, text_dict) - else: - enc_outputs_class_unselected = self.enc_out_class_embed(output_memory) - - topk_logits = enc_outputs_class_unselected.max(-1)[0] - enc_outputs_coord_unselected = ( - self.enc_out_bbox_embed(output_memory) + output_proposals - ) # (bs, \sum{hw}, 4) unsigmoid - topk = self.num_queries - - topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] # bs, nq - - # gather boxes - refpoint_embed_undetach = torch.gather( - enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) - ) # unsigmoid - refpoint_embed_ = refpoint_embed_undetach.detach() - init_box_proposal = torch.gather( - output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) - ).sigmoid() # sigmoid - - # gather tgt - tgt_undetach = torch.gather( - output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model) - ) - if self.embed_init_tgt: - tgt_ = ( - self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) - ) # nq, bs, d_model - else: - tgt_ = tgt_undetach.detach() - - if refpoint_embed is not None: - refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1) - tgt = torch.cat([tgt, tgt_], dim=1) - else: - refpoint_embed, tgt = refpoint_embed_, tgt_ - - elif self.two_stage_type == "no": - tgt_ = ( - self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) - ) # nq, bs, d_model - refpoint_embed_ = ( - self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) - ) # nq, bs, 4 - - if refpoint_embed is not None: - refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1) - tgt = torch.cat([tgt, tgt_], dim=1) - else: - refpoint_embed, tgt = refpoint_embed_, tgt_ - - if self.num_patterns > 0: - tgt_embed = tgt.repeat(1, self.num_patterns, 1) - refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1) - tgt_pat = self.patterns.weight[None, :, :].repeat_interleave( - self.num_queries, 1 - ) # 1, n_q*n_pat, d_model - tgt = tgt_embed + tgt_pat - - init_box_proposal = refpoint_embed_.sigmoid() - - else: - raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type)) - ######################################################### - # End preparing tgt - # - tgt: bs, NQ, d_model - # - refpoint_embed(unsigmoid): bs, NQ, d_model - ######################################################### - - ######################################################### - # Begin Decoder - ######################################################### - hs, references = self.decoder( - tgt=tgt.transpose(0, 1), - memory=memory.transpose(0, 1), - memory_key_padding_mask=mask_flatten, - pos=lvl_pos_embed_flatten.transpose(0, 1), - refpoints_unsigmoid=refpoint_embed.transpose(0, 1), - level_start_index=level_start_index, - spatial_shapes=spatial_shapes, - valid_ratios=valid_ratios, - tgt_mask=attn_mask, - memory_text=text_dict["encoded_text"], - text_attention_mask=~text_dict["text_token_mask"], - # we ~ the mask . False means use the token; True means pad the token - ) - ######################################################### - # End Decoder - # hs: n_dec, bs, nq, d_model - # references: n_dec+1, bs, nq, query_dim - ######################################################### - - ######################################################### - # Begin postprocess - ######################################################### - if self.two_stage_type == "standard": - hs_enc = tgt_undetach.unsqueeze(0) - ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0) - else: - hs_enc = ref_enc = None - ######################################################### - # End postprocess - # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None - # ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None - ######################################################### - - return hs, references, hs_enc, ref_enc, init_box_proposal - # hs: (n_dec, bs, nq, d_model) - # references: sigmoid coordinates. (n_dec+1, bs, bq, 4) - # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None - # ref_enc: sigmoid coordinates. \ - # (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None - - -class TransformerEncoder(nn.Module): - def __init__( - self, - encoder_layer, - num_layers, - d_model=256, - num_queries=300, - enc_layer_share=False, - text_enhance_layer=None, - feature_fusion_layer=None, - use_checkpoint=False, - use_transformer_ckpt=False, - ): - """_summary_ - - Args: - encoder_layer (_type_): _description_ - num_layers (_type_): _description_ - norm (_type_, optional): _description_. Defaults to None. - d_model (int, optional): _description_. Defaults to 256. - num_queries (int, optional): _description_. Defaults to 300. - enc_layer_share (bool, optional): _description_. Defaults to False. - - """ - super().__init__() - # prepare layers - self.layers = [] - self.text_layers = [] - self.fusion_layers = [] - if num_layers > 0: - self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share) - - if text_enhance_layer is not None: - self.text_layers = _get_clones( - text_enhance_layer, num_layers, layer_share=enc_layer_share - ) - if feature_fusion_layer is not None: - self.fusion_layers = _get_clones( - feature_fusion_layer, num_layers, layer_share=enc_layer_share - ) - else: - self.layers = [] - del encoder_layer - - if text_enhance_layer is not None: - self.text_layers = [] - del text_enhance_layer - if feature_fusion_layer is not None: - self.fusion_layers = [] - del feature_fusion_layer - - self.query_scale = None - self.num_queries = num_queries - self.num_layers = num_layers - self.d_model = d_model - - self.use_checkpoint = use_checkpoint - self.use_transformer_ckpt = use_transformer_ckpt - - @staticmethod - def get_reference_points(spatial_shapes, valid_ratios, device): - reference_points_list = [] - for lvl, (H_, W_) in enumerate(spatial_shapes): - - ref_y, ref_x = torch.meshgrid( - torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), - torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device), - ) - ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_) - ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_) - ref = torch.stack((ref_x, ref_y), -1) - reference_points_list.append(ref) - reference_points = torch.cat(reference_points_list, 1) - reference_points = reference_points[:, :, None] * valid_ratios[:, None] - return reference_points - - def forward( - self, - # for images - src: Tensor, - pos: Tensor, - spatial_shapes: Tensor, - level_start_index: Tensor, - valid_ratios: Tensor, - key_padding_mask: Tensor, - # for texts - memory_text: Tensor = None, - text_attention_mask: Tensor = None, - pos_text: Tensor = None, - text_self_attention_masks: Tensor = None, - position_ids: Tensor = None, - ): - """ - Input: - - src: [bs, sum(hi*wi), 256] - - pos: pos embed for src. [bs, sum(hi*wi), 256] - - spatial_shapes: h,w of each level [num_level, 2] - - level_start_index: [num_level] start point of level in sum(hi*wi). - - valid_ratios: [bs, num_level, 2] - - key_padding_mask: [bs, sum(hi*wi)] - - - memory_text: bs, n_text, 256 - - text_attention_mask: bs, n_text - False for no padding; True for padding - - pos_text: bs, n_text, 256 - - - position_ids: bs, n_text - Intermedia: - - reference_points: [bs, sum(hi*wi), num_level, 2] - Outpus: - - output: [bs, sum(hi*wi), 256] - """ - - output = src - - # preparation and reshape - if self.num_layers > 0: - reference_points = self.get_reference_points( - spatial_shapes, valid_ratios, device=src.device - ) - - if self.text_layers: - # generate pos_text - bs, n_text, text_dim = memory_text.shape - if pos_text is None and position_ids is None: - pos_text = ( - torch.arange(n_text, device=memory_text.device) - .float() - .unsqueeze(0) - .unsqueeze(-1) - .repeat(bs, 1, 1) - ) - pos_text = get_sine_pos_embed(pos_text, num_pos_feats=256, exchange_xy=False) - if position_ids is not None: - pos_text = get_sine_pos_embed( - position_ids[..., None], num_pos_feats=256, exchange_xy=False - ) - - # main process - for layer_id, layer in enumerate(self.layers): - # if output.isnan().any() or memory_text.isnan().any(): - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if self.fusion_layers: - if self.use_checkpoint: - output, memory_text = checkpoint.checkpoint( - self.fusion_layers[layer_id], - output, - memory_text, - key_padding_mask, - text_attention_mask, - ) - else: - output, memory_text = self.fusion_layers[layer_id]( - v=output, - l=memory_text, - attention_mask_v=key_padding_mask, - attention_mask_l=text_attention_mask, - ) - - if self.text_layers: - memory_text = self.text_layers[layer_id]( - src=memory_text.transpose(0, 1), - src_mask=~text_self_attention_masks, # note we use ~ for mask here - src_key_padding_mask=text_attention_mask, - pos=(pos_text.transpose(0, 1) if pos_text is not None else None), - ).transpose(0, 1) - - # main process - if self.use_transformer_ckpt: - output = checkpoint.checkpoint( - layer, - output, - pos, - reference_points, - spatial_shapes, - level_start_index, - key_padding_mask, - ) - else: - output = layer( - src=output, - pos=pos, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - key_padding_mask=key_padding_mask, - ) - - return output, memory_text - - -class TransformerDecoder(nn.Module): - def __init__( - self, - decoder_layer, - num_layers, - norm=None, - return_intermediate=False, - d_model=256, - query_dim=4, - num_feature_levels=1, - ): - super().__init__() - if num_layers > 0: - self.layers = _get_clones(decoder_layer, num_layers) - else: - self.layers = [] - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - assert return_intermediate, "support return_intermediate only" - self.query_dim = query_dim - assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim) - self.num_feature_levels = num_feature_levels - - self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2) - self.query_pos_sine_scale = None - - self.query_scale = None - self.bbox_embed = None - self.class_embed = None - - self.d_model = d_model - - self.ref_anchor_head = None - - def forward( - self, - tgt, - memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2 - # for memory - level_start_index: Optional[Tensor] = None, # num_levels - spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 - valid_ratios: Optional[Tensor] = None, - # for text - memory_text: Optional[Tensor] = None, - text_attention_mask: Optional[Tensor] = None, - ): - """ - Input: - - tgt: nq, bs, d_model - - memory: hw, bs, d_model - - pos: hw, bs, d_model - - refpoints_unsigmoid: nq, bs, 2/4 - - valid_ratios/spatial_shapes: bs, nlevel, 2 - """ - output = tgt - - intermediate = [] - reference_points = refpoints_unsigmoid.sigmoid() - ref_points = [reference_points] - - for layer_id, layer in enumerate(self.layers): - - if reference_points.shape[-1] == 4: - reference_points_input = ( - reference_points[:, :, None] - * torch.cat([valid_ratios, valid_ratios], -1)[None, :] - ) # nq, bs, nlevel, 4 - else: - assert reference_points.shape[-1] == 2 - reference_points_input = reference_points[:, :, None] * valid_ratios[None, :] - query_sine_embed = gen_sineembed_for_position( - reference_points_input[:, :, 0, :] - ) # nq, bs, 256*2 - - # conditional query - raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256 - pos_scale = self.query_scale(output) if self.query_scale is not None else 1 - query_pos = pos_scale * raw_query_pos - # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': - # if query_pos.isnan().any() | query_pos.isinf().any(): - # import ipdb; ipdb.set_trace() - - # main process - output = layer( - tgt=output, - tgt_query_pos=query_pos, - tgt_query_sine_embed=query_sine_embed, - tgt_key_padding_mask=tgt_key_padding_mask, - tgt_reference_points=reference_points_input, - memory_text=memory_text, - text_attention_mask=text_attention_mask, - memory=memory, - memory_key_padding_mask=memory_key_padding_mask, - memory_level_start_index=level_start_index, - memory_spatial_shapes=spatial_shapes, - memory_pos=pos, - self_attn_mask=tgt_mask, - cross_attn_mask=memory_mask, - ) - if output.isnan().any() | output.isinf().any(): - print(f"output layer_id {layer_id} is nan") - try: - num_nan = output.isnan().sum().item() - num_inf = output.isinf().sum().item() - print(f"num_nan {num_nan}, num_inf {num_inf}") - except Exception as e: - print(e) - # if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1': - # import ipdb; ipdb.set_trace() - - # iter update - if self.bbox_embed is not None: - # box_holder = self.bbox_embed(output) - # box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points) - # new_reference_points = box_holder[..., :self.query_dim].sigmoid() - - reference_before_sigmoid = inverse_sigmoid(reference_points) - delta_unsig = self.bbox_embed[layer_id](output) - outputs_unsig = delta_unsig + reference_before_sigmoid - new_reference_points = outputs_unsig.sigmoid() - - reference_points = new_reference_points.detach() - # if layer_id != self.num_layers - 1: - ref_points.append(new_reference_points) - - intermediate.append(self.norm(output)) - - return [ - [itm_out.transpose(0, 1) for itm_out in intermediate], - [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points], - ] - - -class DeformableTransformerEncoderLayer(nn.Module): - def __init__( - self, - d_model=256, - d_ffn=1024, - dropout=0.1, - activation="relu", - n_levels=4, - n_heads=8, - n_points=4, - ): - super().__init__() - - # self attention - self.self_attn = MSDeformAttn( - embed_dim=d_model, - num_levels=n_levels, - num_heads=n_heads, - num_points=n_points, - batch_first=True, - ) - self.dropout1 = nn.Dropout(dropout) - self.norm1 = nn.LayerNorm(d_model) - - # ffn - self.linear1 = nn.Linear(d_model, d_ffn) - self.activation = _get_activation_fn(activation, d_model=d_ffn) - self.dropout2 = nn.Dropout(dropout) - self.linear2 = nn.Linear(d_ffn, d_model) - self.dropout3 = nn.Dropout(dropout) - self.norm2 = nn.LayerNorm(d_model) - - @staticmethod - def with_pos_embed(tensor, pos): - return tensor if pos is None else tensor + pos - - def forward_ffn(self, src): - src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) - src = src + self.dropout3(src2) - src = self.norm2(src) - return src - - def forward( - self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None - ): - # self attention - # import ipdb; ipdb.set_trace() - src2 = self.self_attn( - query=self.with_pos_embed(src, pos), - reference_points=reference_points, - value=src, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - key_padding_mask=key_padding_mask, - ) - src = src + self.dropout1(src2) - src = self.norm1(src) - - # ffn - src = self.forward_ffn(src) - - return src - - -class DeformableTransformerDecoderLayer(nn.Module): - def __init__( - self, - d_model=256, - d_ffn=1024, - dropout=0.1, - activation="relu", - n_levels=4, - n_heads=8, - n_points=4, - use_text_feat_guide=False, - use_text_cross_attention=False, - ): - super().__init__() - - # cross attention - self.cross_attn = MSDeformAttn( - embed_dim=d_model, - num_levels=n_levels, - num_heads=n_heads, - num_points=n_points, - batch_first=True, - ) - self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() - self.norm1 = nn.LayerNorm(d_model) - - # cross attention text - if use_text_cross_attention: - self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) - self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity() - self.catext_norm = nn.LayerNorm(d_model) - - # self attention - self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) - self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() - self.norm2 = nn.LayerNorm(d_model) - - # ffn - self.linear1 = nn.Linear(d_model, d_ffn) - self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1) - self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() - self.linear2 = nn.Linear(d_ffn, d_model) - self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity() - self.norm3 = nn.LayerNorm(d_model) - - self.key_aware_proj = None - self.use_text_feat_guide = use_text_feat_guide - assert not use_text_feat_guide - self.use_text_cross_attention = use_text_cross_attention - - def rm_self_attn_modules(self): - self.self_attn = None - self.dropout2 = None - self.norm2 = None - - @staticmethod - def with_pos_embed(tensor, pos): - return tensor if pos is None else tensor + pos - - def forward_ffn(self, tgt): - with torch.cuda.amp.autocast(enabled=False): - tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout4(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward( - self, - # for tgt - tgt: Optional[Tensor], # nq, bs, d_model - tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos)) - tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos) - tgt_key_padding_mask: Optional[Tensor] = None, - tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4 - memory_text: Optional[Tensor] = None, # bs, num_token, d_model - text_attention_mask: Optional[Tensor] = None, # bs, num_token - # for memory - memory: Optional[Tensor] = None, # hw, bs, d_model - memory_key_padding_mask: Optional[Tensor] = None, - memory_level_start_index: Optional[Tensor] = None, # num_levels - memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2 - memory_pos: Optional[Tensor] = None, # pos for memory - # sa - self_attn_mask: Optional[Tensor] = None, # mask used for self-attention - cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention - ): - """ - Input: - - tgt/tgt_query_pos: nq, bs, d_model - - - """ - assert cross_attn_mask is None - - # self attention - if self.self_attn is not None: - # import ipdb; ipdb.set_trace() - q = k = self.with_pos_embed(tgt, tgt_query_pos) - tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - - if self.use_text_cross_attention: - tgt2 = self.ca_text( - self.with_pos_embed(tgt, tgt_query_pos), - memory_text.transpose(0, 1), - memory_text.transpose(0, 1), - key_padding_mask=text_attention_mask, - )[0] - tgt = tgt + self.catext_dropout(tgt2) - tgt = self.catext_norm(tgt) - - tgt2 = self.cross_attn( - query=self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1), - reference_points=tgt_reference_points.transpose(0, 1).contiguous(), - value=memory.transpose(0, 1), - spatial_shapes=memory_spatial_shapes, - level_start_index=memory_level_start_index, - key_padding_mask=memory_key_padding_mask, - ).transpose(0, 1) - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - - # ffn - tgt = self.forward_ffn(tgt) - - return tgt - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - num_queries=args.num_queries, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - query_dim=args.query_dim, - activation=args.transformer_activation, - num_patterns=args.num_patterns, - num_feature_levels=args.num_feature_levels, - enc_n_points=args.enc_n_points, - dec_n_points=args.dec_n_points, - learnable_tgt_init=True, - # two stage - two_stage_type=args.two_stage_type, # ['no', 'standard', 'early'] - embed_init_tgt=args.embed_init_tgt, - use_text_enhancer=args.use_text_enhancer, - use_fusion_layer=args.use_fusion_layer, - use_checkpoint=args.use_checkpoint, - use_transformer_ckpt=args.use_transformer_ckpt, - use_text_cross_attention=args.use_text_cross_attention, - text_dropout=args.text_dropout, - fusion_dropout=args.fusion_dropout, - fusion_droppath=args.fusion_droppath, - ) diff --git a/spaces/merve/anonymization/public/third_party/npyjs.js b/spaces/merve/anonymization/public/third_party/npyjs.js deleted file mode 100644 index bd741887cd85f0a495015968a3793f9d1d944efe..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/public/third_party/npyjs.js +++ /dev/null @@ -1,108 +0,0 @@ -// Apache-2.0 https://github.com/1wheel/npyjs - -const dtypes = { - ' '\x20').join(''); - - const hl = (header + spacepad).length; - - return Buffer.concat([ - Buffer.from('\x93NUMPY\x01\x00', 'latin1'), - // convert to little-endian - Buffer.from(new Uint8Array([hl % 256, hl/256 | 0])), - Buffer.from(header + spacepad, 'latin1'), - Buffer.from(typedArray.buffer) - ]); -} - -export default {parse, format}; \ No newline at end of file diff --git a/spaces/merve/data-leak/public/dataset-worldviews/script.js b/spaces/merve/data-leak/public/dataset-worldviews/script.js deleted file mode 100644 index 3ebba088d65f389af1b446a9ea90fcde674d5fdf..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/dataset-worldviews/script.js +++ /dev/null @@ -1,588 +0,0 @@ - -console.clear(); - -var ttSel = d3.select("body").selectAppend("div.tooltip.tooltip-hidden"); -// For result tables -const columns = ["object", "n", "n correct", "accuracy"]; -const rowHeight = 50; -const rowWidth = 100; -const buffer = 2; - -const classifierBlobWidth = 50; -const classifierBlobHeight = 460; - -function drawShapesWithData(classifier) { - var divHeight = classifier.class == "show-shapes" ? 250 : 490; - - var c = d3.conventions({ - sel: d3.select("." + classifier.class).html(""), - width: 1300, - height: divHeight, - layers: "ds", - }); - - function runClassifier() { - classifier.isClassified = true; - var duration = 3000; - classifierSel.classed("is-classified", true); - graphResultsGroup.classed("is-classified", true); - - drawResults(); - buttonSel.text("Reset"); - - var minX = d3.min(shapeParams, (d) => d.endX - 50); - var timer = d3.timer((ms) => { - if (!classifier.isClassified) { - timer.stop(); - shapeSel.classed("is-classified", false); - return; - } - - var t = d3.easeCubicInOut(ms / duration); - t = d3.clamp(0, t, 1); - - shapeParams.forEach((d, i) => { - d.x = d.startX + (d.endX - d.startX) * t; - d.y = d.startY + (d.endY - d.startY) * t; - d.isClassified = d.x > minX; - }); - - shapeSel - .translate((d) => [d.x, d.y]) - .classed("is-classified", (d) => d.isClassified); - - if (t == 1) { - timer.stop(); - } - }); - } - - function resetClassifier() { - shapeSel.translate((d) => [d.startX, d.startY]); - shapeSel.classed("is-classified", false); - classifier.isClassified = false; - shapeSel - .transition("position") - .duration(0) - .translate((d) => [d.startX, d.startY]); - classifierSel.classed("is-classified", false); - graphResultsGroup.classed("is-classified", false); - if (classifier.class != "show-shapes") { - classifierBlobSel.attr("opacity", 100); - } - - drawResults(); - buttonSel.text("Run Classifier"); - } - - // Add run/reset button - var buttonSel = d3 - .select("." + classifier.class + "-button") - .html("") - .append("button#run") - .at({ - type: "button", - class: "classifier-button", - }) - .text("Run Classifier") - .on("click", () => { - // if already classified, reset - if (classifier.isClassified) { - // Resetting - resetClassifier(); - } else { - runClassifier(); - } - }); - - // Backgrounds for different classifications - var classifierSel = c.svg - .append("g") - .at({ - class: "classifier", - }) - .translate([465, 20]); - - classifierSel - .append("path.classifier-bg-shaded") - .at({ - d: classifierBgPathTop, - // fill: "#ccc", - // stroke: "#000", - }) - .translate([-50, 0]); - - classifierSel - .append("text.classifier-bg-text") - .at({ - fill: "#000", - textAnchor: "middle", - dominantBaseline: "central", - class: "monospace", - }) - .text("shaded") - .translate([160, 15]); - - classifierSel - .append("path.classifier-bg-unshaded") - .at({ - d: classifierBgPathBottom, - }) - .translate([-50, 160]); - - classifierSel - .append("text.classifier-bg-text") - .at({ - fill: "#000", - textAnchor: "middle", - dominantBaseline: "central", - class: "monospace", - }) - .text("unshaded") - .translate([160, 175]); - - // Add the shapes themselves - var shapeSel = c.svg - .appendMany("path.shape", shapeParams) - .at({ - d: (d) => d.path, - class: (d) => "gt-" + d.gt + " " + d.correctness, - }) - .translate(function (d) { - if (classifier.class == "show-shapes") { - return [d.initialX + 35, d.initialY-20]; - } else { - return [d.startX, d.startY]; - } - }) - .call(d3.attachTooltip) - .on("mouseover", (d) => { - ttSel.html(""); - if (classifier.usingLabel != "none") { - ttSel - .append("div") - .html( - `labeled: ${toPropertyString( - d[classifier.usingLabel], - classifier.isRounding - ).slice(0, -1)}` - ); - } - var gtSel = ttSel - .append("div") - .html( - `ground truth: ${d.gt}` - ); - if (classifier.isClassified) { - ttSel - .append("div.labeled-row") - .html( - `classified as: ${d.label}` - ); - - ttSel - .append("div.correct-row") - .classed("is-correct-tooltip", d.correctness == "correct") - .html(`
      ${d.correctness}ly classified `); - } - ttSel.classed("tt-text", true); - }); - - // If we're just showing shapes, ignore everything else - if (classifier.class == "show-shapes") return; - - // Add "classifier" line - var classifierBlobSel = c.svg - .append("g") - .at({ - class: "classifier-blob", - strokeWidth: 0, - }) - .translate([378, 20]); - - classifierBlobSel - .append("line.classifier-blob") - .at({ - class: "line", - x1: 27, - x2: 27, - y1: 0, - y2: 464, - stroke: "#000", - strokeWidth: 1, - }) - .style("stroke-dasharray", "5, 5"); - - classifierBlobSel - .append("text.classifier-blob-text") - .at({ - class: "classifier-blob-text monospace", - textAnchor: "middle", - dominantBaseline: "central", - }) - .text("is_shaded classifier") - .attr("transform", "translate(30,480) rotate(0)"); - - if (classifier.class == "show-shapes") { - classifierBlobSel.classed("is-classified", true); - } - - // Draw the results table with accuracies - // This will be hidden before classifier is run. - var graphResultsGroup = c.svg - .append("g") - .attr("class", "results") - .translate([-20, 19]); - - function drawResults() { - // Write text summary - summarySel = d3 - .select("." + classifier.class + "-summary") - .html(summaries[classifier.class]) - .translate([0, 20]); - summarySel.classed("summary-text", true); - summarySel.classed("is-classified", classifier.isClassified); - - if (!classifier.isClassified) { - c.layers[0].html(""); - classifier.wasClassified = false; - return; - } - - // Access results, which are calculated in shapes.js. - // If there are none, draw nothing. - results = allResults[classifier.class]; - if (!results) return; - - // Figure out which shapes should be highlighted on mouseover - // This depends on whether we're "rounding" edge case examples. - function isMatch(rowName, labelName, isRounding) { - // Not filtering at all - if (rowName == "shape") { - return true; - } - if (isRounding == true) { - // No "other" category - return labelName.includes(toOriginalString(rowName)) - ? true - : false; - } else { - // There is an "other" category, prefixed by "rt_" - if (labelName == toOriginalString(rowName)) { - return true; - } else if ( - labelName.includes("rt_") && - rowName == "other shapes" - ) { - return true; - } - return false; - } - } - - // Color the last row of each table - function getColor(d, i) { - if (i != 3) { - // not last index - return "#e6e6e6"; - } else { - var scaleRowValue = d3 - .scaleLinear() - .domain([0.3, 1.0]) - .range([0, 1]); - return d3.interpolateRdYlGn(scaleRowValue(d)); - } - } - - // Adjust text color for visibility - function getTextColor(d, i) { - if (i != 3) { - // not last index - return "#000000"; - } else { - var bgColor = getColor(d, i); - if (d < 0.3) { - // Alternative: use a brighter color? - // return d3.rgb(bgColor).brighter(-2); - return "#FFCCD8"; - } else { - // Alternative: use a darker color? - // return d3.rgb(bgColor).darker(2); - return "#000000"; - } - } - } - - // Draw results table - var tableSel = c.layers[0] - .html("") - .raise() - .st({ width: 400 }) - .append("div") - .translate([0, 10]) - .append("table.results-table.monospace") - .st({ width: 400 }); - - var header = tableSel - .append("thead") - .append("tr") - .appendMany("th", columns) - .text((d) => d); - - var rowSel = tableSel - .appendMany("tr", results) - .at({ - class: "row monospace", - }) - .on("mouseover", (row) => { - if (classifier.class == "default-classifier") { - return; - } - rowSel.classed("active", (d) => d == row); - shapeSel.classed("shape-row-unhighlighted", function (d) { - return !isMatch( - row.object, - d[classifier.usingLabel], - (isRounding = classifier.isRounding) - ); - }); - }) - .on("mouseout", (row) => { - rowSel.classed("active", function (d) { - if (d == row) { - return false; - } - }); - if (classifier.isClassified) { - shapeSel.classed("shape-row-unhighlighted", 0); - } - }); - - rowSel - .appendMany("td", (result) => - columns.map((column) => result[column]) - ) - .text((d) => d) - .st({ - backgroundColor: getColor, - color: getTextColor, - }); - - header.style("opacity", 0); - rowSel.style("opacity", 0); - - // If the classifier has already been run before, draw results right away. - // Otherwise, wait for other animation to run before drawing results. - var initialDelay = classifier.wasClassified ? 0 : 2000; - classifier.wasClassified = true; - - header - .transition() - .delay(initialDelay) - .duration(1000) - .style("opacity", 1); - rowSel - .transition() - .delay(function (d, i) { - return initialDelay + i * 200; - }) - .duration(1000) - .style("opacity", 1); - } - - // Draw the dropdowns for selecting different labels - function drawDropdown() { - if (!classifier.options) return; - - ["rounding", "category"].forEach(function (classifierType) { - if (!classifier.options[classifierType]) return; - var sel = d3 - .select("#" + classifier.class + "-select-" + classifierType) - .html(""); - sel.classed("dropdown", true); - sel.appendMany("option", classifier.options[classifierType]) - .at({ - value: function (d) { - return d.value; - }, - }) - .text((d) => d.label); - sel.on("change", function () { - if (classifierType == "rounding") { - classifier.isRounding = toBool(this.value); - } else { - classifier.usingLabel = this.value; - } - updateResults(); - drawResults(); - }); - }); - } - drawDropdown(); - updateResults(); - drawResults(); - - // For continuity, auto-run the second two classifiers - if ( - classifier.class == "second-classifier" || - classifier.class == "final-classifier" - ) { - runClassifier(); - } -} - -// Draw the "Labels Tell Stories" section -function drawConclusion() { - function drawNewspapers() { - d3.select(".conclusion-newspapers").html(function () { - var imgPath = - "img/newspapers_" + - document.getElementById("conclusion-select-category").value; - return ( - 'Newspapers with headlines about bias and fairness in shape data.' - ); - }); - } - - function drawInterface() { - d3.select(".conclusion-interface").html(function () { - var imgPath = - "img/confusing_" + - document.getElementById("conclusion-select-category").value; - return ( - '
      A shape that is difficult to classify with several checkboxes, none of which describe the shape. Next to the interface is a text box with a single question mark in it.
      ' - ); - }); - } - - function drawConclusionSummary() { - classifierSel = d3 - .select(".conclusion-summary") - .html(summaries["conclusion"]); - classifierSel.classed("summary-text is-classified", true); - } - - function drawDropdown() { - var sel = d3.select("#conclusion-select-category").html(""); - sel.classed("dropdown", true); - sel.appendMany("option", conclusionOptions.category) - .at({ - value: function (d) { - return d.value; - }, - }) - .text((d) => d.label); - // sel.attr('select', 'circles, triangles, and rectangles'); - sel.on("change", function (d) { - makeConclusionUpdates(); - }); - } - - function makeConclusionUpdates() { - updateResults(); - drawNewspapers(); - drawInterface(); - drawConclusionSummary(); - } - drawDropdown(); - makeConclusionUpdates(); -} - -// Handle the parameters everywhere classifiers are drawn -var classifiers = [ - { - // Just the initial display of shapes, not interactive - class: "show-shapes", - colorBy: (d) => d.correctness, - isClassified: false, - isRounding: false, - usingLabel: "none", - }, - { - class: "default-classifier", - colorBy: (d) => d.correctness, - isClassified: false, - isRounding: false, - usingLabel: "none", - }, - { - class: "second-classifier", - colorBy: (d) => d.correctness, - isClassified: false, - isRounding: true, - usingLabel: "shape_name", - options: { - rounding: [ - { label: "with their best guess", value: true }, - { label: 'as "other"', value: false }, - ], - }, - }, - { - class: "final-classifier", - colorBy: (d) => d.correctness, - isClassified: false, - isRounding: true, - usingLabel: "shape_name", - options: { - rounding: [ - { label: "with our best guess", value: true }, - { label: 'as "other"', value: false }, - ], - category: [ - { - label: "circles, triangles, or rectangles", - value: "shape_name", - }, - { label: "pointy shapes or round shapes", value: "pointiness" }, - { label: "small shapes or big shapes", value: "size" }, - { label: "just shapes", value: "none" }, - ], - }, - }, -]; - -// "Labels Tell Stories" dropdown options -var conclusionOptions = { - category: [ - { label: "circles, triangles, and rectangles", value: "shape_name" }, - { label: "pointy shapes and round shapes", value: "pointiness" }, - { label: "small shapes and big shapes", value: "size" }, - ], -}; - -classifiers.forEach(drawShapesWithData); -drawConclusion(); - -// These images are loaded invisibly so they appear seamlessly on dropdown change -const preloadImages = [ - "img/confusing_pointiness.png", - "img/confusing_pointiness.svg", - "img/confusing_shape_name.png", - "img/confusing_shape_name.svg", - "img/confusing_size.png", - "img/confusing_size.svg", - "img/interface_default.png", - "img/interface_default.svg", - "img/interface_shape_name_false.png", - "img/interface_shape_name_false.svg", - "img/interface_shape_name_true.png", - "img/interface_shape_name_true.svg", - "img/newspapers_pointiness.png", - "img/newspapers_pointiness.svg", - "img/newspapers_shape_name.png", - "img/newspapers_shape_name.svg", - "img/newspapers_size.png", - "img/newspapers_size.svg", -]; - -d3.select(".preload-dropdown-img") - .html("") - .appendMany("img", preloadImages) - .at({ src: (d) => d }); diff --git a/spaces/merve/measuring-fairness/source/uncertainty-calibration/util.js b/spaces/merve/measuring-fairness/source/uncertainty-calibration/util.js deleted file mode 100644 index a0ce5b12a2a642f1186cc4004e90b046a89611f8..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/uncertainty-calibration/util.js +++ /dev/null @@ -1,38 +0,0 @@ -window.initUtil = function(){ - function addAxisLabel(c, xText, yText, xOffset=40, yOffset=-40){ - c.svg.select('.x').append('g') - .translate([c.width/2, xOffset]) - .append('text.axis-label') - .text(xText) - .at({textAnchor: 'middle'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - - c.svg.select('.y') - .append('g') - .translate([yOffset, c.height/2]) - .append('text.axis-label') - .text(yText) - .at({textAnchor: 'middle', transform: 'rotate(-90)'}) - .st({fill: '#000', fontSize: 14, fontFamily: 'sans-serif'}) - } - - function ggPlotBg(c, isBlack=true){ - if (isBlack){ - c.svg.append('rect.bg-rect') - .at({width: c.width, height: c.height, fill: '#eee'}) - .lower() - } - - c.svg.selectAll('.tick').selectAll('line').remove() - c.svg.selectAll('.y .tick') - .append('path').at({d: 'M 0 0 H ' + c.width, stroke: '#fff', strokeWidth: 1}) - c.svg.selectAll('.y text').at({x: -3}) - c.svg.selectAll('.x .tick') - .append('path').at({d: 'M 0 0 V -' + c.height, stroke: '#fff', strokeWidth: 1}) - } - - - return {addAxisLabel, ggPlotBg} -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/source/anonymization/make-sel.js b/spaces/merve/uncertainty-calibration/source/anonymization/make-sel.js deleted file mode 100644 index 3b35b931008be7afe990694afdf232d05d5f4ee2..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/anonymization/make-sel.js +++ /dev/null @@ -1,78 +0,0 @@ -window.makeSel = function(){ - function ttFmt(d){ - var ttSel = d3.select('.tooltip').html('') - - var ageStr = d.age + ' year old' - if (slides.curSlide.index == 4){ - ageStr = ageStr + ' born in the ' + ['spring', 'summer', 'fall', 'winter'][d.season] - } - ttSel.append('div').html(` - ${ageStr} from ${d.state} who - ${d.plagerized ? - 'plagiarized' : - 'never plagiarized'} - `) - - if (slides.curSlide.index < 6) return - - var isHeads = d.coinVals[estimates.active.index] < sliders.headsProb - ttSel.append('div').html(` - They flipped - ${isHeads ? 'heads' : 'tails'} - and said they had - ${d.plagerized || isHeads ? - 'plagiarized' : - 'never plagiarized'} - `) - .st({marginTop: 10}) - } - - var rectAt = {} - var rs = (axii.bw - 10)*2 - rectAt.ageState = {width: rs, height: rs, x: -rs/2, y: -rs/2} - var uniqueBox = c.svg.appendMany('rect.unique.init-hidden', students.byAgeState.filter(d => d.length == 1)) - .translate(d => d.pos) - .at(rectAt.ageState) - - var rs = axii.bw/4 + 5.5 - rectAt.ageStateSeason = {width: rs, height: rs, x: Math.round(-rs/2), y: 4} - var uniqueSeasonBox = c.svg.appendMany( - 'rect.unique.init-hidden', - students.byAgeStateSeason.filter(d => d.length == 1 && d[0].group.ageState.length > 1)) - .translate(d => d.pos) - .at(rectAt.ageStateSeason) - - // number of uniquely id'd students - // console.log(uniqueSeasonBox.size()) - - var studentGroup = c.svg.append('g') - .at({width: 500, height: 500}) - - var student = studentGroup.appendMany('g.student', students.all) - .call(d3.attachTooltip) - .on('mouseover', ttFmt) - .translate(d => d.isAdditionalStudent ? [0,0]: d.pos.grid) - .classed('inactive', d => d.isAdditionalStudent) - - var rs = 16 - var flipCircle = student.append('circle') - .at({transform: 'scale(.1)'}) - .at({r: 9, fill: '#fff'}) - .at({stroke: '#b0b' }) - - var circle = student.append('circle').at({ - r: 5, - fill: d => d.plagerized ? '#f0f' : '#ccc', - stroke: d => d.plagerized ? '#b0b' : '#aaa', - strokeWidth: 1, - }) - - - - addSwoop(c) - - return {student, studentGroup, circle, flipCircle, rectAt, uniqueBox, uniqueSeasonBox} -} - - -if (window.init) window.init() diff --git a/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/init.js b/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/init.js deleted file mode 100644 index d23a4fecea1bfa4fae6557043d8053dc3acc29ce..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/init.js +++ /dev/null @@ -1,36 +0,0 @@ -window.thresholds = [0, 0.2, 0.4, 0.6, 0.8, 1]; -window.emojis = ['☀️','🌧️']; -window.constant_score = 0.5; - -window.ttSel = d3.select('body').selectAppend('div.tooltip.tooltip-hidden') - - -window.init = function(){ - - var graphSel = d3.select('#graph') - var width = height = graphSel.node().offsetWidth - if (innerWidth <= 925){ - width = innerWidth - height = innerHeight*.65 - window.isMobile = true - } - fig_height = height/2 - fig_width = width - - - window.util = window.initUtil() - window.weatherGraph = window.drawWeatherGraph(graphSel, fig_height, fig_width); - window.calibrationCurve = window.drawCalibrationCurve(graphSel, fig_height, fig_width); - // window.calibrationSlider = window.drawCalibrationSlider(weatherGraph, calibrationCurve, fig_width/2) - // window.modelRemapper = window.drawModelRemapping(fig_width/2); - - - window.slides = window.drawSlides() - weatherGraph.renderThresholds() - -} - -window.init() - - - diff --git a/spaces/mikeee/ultimatumbee/tests/__init__.py b/spaces/mikeee/ultimatumbee/tests/__init__.py deleted file mode 100644 index d7bd10ea09e7a9aaf89296de7107fd47b4f4787b..0000000000000000000000000000000000000000 --- a/spaces/mikeee/ultimatumbee/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Init tests.""" diff --git a/spaces/mishtert/tracer/dtxutils.py b/spaces/mishtert/tracer/dtxutils.py deleted file mode 100644 index 99a3ac4833a465602517fb6601d5272c9c206465..0000000000000000000000000000000000000000 --- a/spaces/mishtert/tracer/dtxutils.py +++ /dev/null @@ -1,343 +0,0 @@ -from meshutils import nct_to_mesh_term, mesh_term_to_id, df_mesh, df_mesh_ct -from cid import CaseInsensitiveDict -from dictutils import * -import re -import streamlit as st - - -# mesh list extract -def meshtrm_lst_xtract(nct_value): - try: - mesh_term = nct_to_mesh_term[nct_value] - mesh_term_list = list(mesh_term) - return mesh_term_list - except: - pass - - -@st.cache(suppress_st_warning=True, allow_output_mutation=True) -# type extract fun -def type_extract(mesh_term_list): - mesh_term_list = [mesh_term_list] if isinstance(mesh_term_list, str) else mesh_term_list - # print('mesh_term_list: ',mesh_term_list) - - # l2_map_lst=[] - uid_lst = [] - if mesh_term_list is not None: - for val in mesh_term_list: - # print('value inside uid forloop:',val) - try: - # print('Inside get uid') - uid = mesh_term_to_id[val] - uid_lst.append(uid) - # print(uid_lst) - if uid_lst is None: - uid_lst = [] - except: - pass - # print('error in get uid list') - - # get mesh num - mesh_num_xtract_lst = [] - - for val in uid_lst: - try: - # print('Inside get mesh num') - mesh_num_xtract = df_mesh.loc[df_mesh['ui'] == val, 'mesh_number'].iloc[0] - mesh_num_xtract_lst.append(mesh_num_xtract) - # print(mesh_num_xtract_lst) - if ',' in mesh_num_xtract_lst[0]: - mesh_num_xtract_lst = mesh_num_xtract_lst[0].split(", ") - # print('mesh_num_xtract_lst after spltting',mesh_num_xtract_lst) - except: - pass - # print('error in get mesh num') - - # mesh number extract l2 - l2_map_lst = [] - for val in mesh_num_xtract_lst: - # print('Inside l2map for loop',val) - search_value = val[:3] - # print('printing search value:',search_value) - try: - l2_map = df_mesh.loc[df_mesh['mesh_number'] == search_value, 'name'].iloc[0] - # print(l2_map) - l2_map_lst.append(l2_map) - # print(l2_map_lst) - if l2_map_lst is None: - l2_map_lst = [] - except: - pass - - l2_map_lst = list(set(l2_map_lst)) - # print('finaloutput',l2_map_lst) - return l2_map_lst - - -def split_values(col_val): - # """split words seperated by special characters""" - # print(col_val) - if col_val != '': - char_list = ['|', ',', '/', '.', ';', './', ',/', '/ ', ' /'] - # res = ' '.join([ele for ele in char_list if(ele in col_val)]) - res = [ele for ele in char_list if (ele in col_val)] - # print('printing string of found char',res) - colstring = str(col_val) - f_res = [] - try: - while len(res) > 0: - res = res[-1] - f_res = colstring.split(''.join(res)) - # print(f_res) - # return f_res - f_res = [x for x in f_res if x is not None] - return ', '.join(f_res) - except: - pass - else: - return col_val - - -def map_entry_terms(myText): - obj = CaseInsensitiveDict(entry_dict) - pattern = re.compile(r'(? - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'Chat', - 'InternalSearchQuery', - 'Disengaged', - 'InternalLoaderMessage', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('Invalid response', ErrorCode.UNKOWN_ERROR) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/miyaaa666/bingo/src/lib/hooks/chat-history.ts b/spaces/miyaaa666/bingo/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py b/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py deleted file mode 100644 index 3077ec828988957c95fa85e8399ed382273a0257..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/datasets/scannet_preprocess/scannet_pair/reader.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse -import os, sys - -from SensorData import SensorData - - -def reader(filename, - output_path, - frame_skip, - export_color_images=False, - export_depth_images=False, - export_poses=False, - export_intrinsics=False): - if not os.path.exists(output_path): - os.makedirs(output_path) - - # load the data - print('loading %s...' % filename) - sd = SensorData(filename) - if export_depth_images: - sd.export_depth_images(os.path.join(output_path, 'depth'), frame_skip=frame_skip) - if export_color_images: - sd.export_color_images(os.path.join(output_path, 'color'), frame_skip=frame_skip) - if export_poses: - sd.export_poses(os.path.join(output_path, 'pose'), frame_skip=frame_skip) - if export_intrinsics: - sd.export_intrinsics(os.path.join(output_path, 'intrinsic')) diff --git a/spaces/mohit-217/invoice_by_mohit/app.py b/spaces/mohit-217/invoice_by_mohit/app.py deleted file mode 100644 index 8f41fbc354819e18007f9e567da68c1e8e98cef3..0000000000000000000000000000000000000000 --- a/spaces/mohit-217/invoice_by_mohit/app.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -os.system('pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu') -import gradio as gr -import numpy as np -from transformers import AutoModelForTokenClassification -from datasets.features import ClassLabel -from transformers import AutoProcessor -from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D -import torch -from datasets import load_metric -from transformers import LayoutLMv3ForTokenClassification -from transformers.data.data_collator import default_data_collator - - -from transformers import AutoModelForTokenClassification -from datasets import load_dataset -from PIL import Image, ImageDraw, ImageFont - - -processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=True) -model = AutoModelForTokenClassification.from_pretrained("Theivaprakasham/layoutlmv3-finetuned-invoice") - - - -# load image example -dataset = load_dataset("darentang/generated", split="test") -# Image.open(dataset[2]["image_path"]).convert("RGB").save("example1.png") -# Image.open(dataset[1]["image_path"]).convert("RGB").save("example2.png") -# Image.open(dataset[0]["image_path"]).convert("RGB").save("example3.png") -# define id2label, label2color -labels = dataset.features['ner_tags'].feature.names -id2label = {v: k for v, k in enumerate(labels)} -label2color = { - "B-ABN": 'blue', - "B-BILLER": 'blue', - "B-BILLER_ADDRESS": 'green', - "B-BILLER_POST_CODE": 'orange', - "B-DUE_DATE": "blue", - "B-GST": 'green', - "B-INVOICE_DATE": 'violet', - "B-INVOICE_NUMBER": 'orange', - "B-SUBTOTAL": 'green', - "B-TOTAL": 'blue', - "I-BILLER_ADDRESS": 'blue', - "O": 'orange' - } - -def unnormalize_box(bbox, width, height): - return [ - width * (bbox[0] / 1000), - height * (bbox[1] / 1000), - width * (bbox[2] / 1000), - height * (bbox[3] / 1000), - ] - - -def iob_to_label(label): - return label - - - -def process_image(image): - - print(type(image)) - width, height = image.size - - # encode - encoding = processor(image, truncation=True, return_offsets_mapping=True, return_tensors="pt") - offset_mapping = encoding.pop('offset_mapping') - - # forward pass - outputs = model(**encoding) - - # get predictions - predictions = outputs.logits.argmax(-1).squeeze().tolist() - token_boxes = encoding.bbox.squeeze().tolist() - - # only keep non-subword predictions - is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0 - true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]] - true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]] - - # draw predictions over the image - draw = ImageDraw.Draw(image) - font = ImageFont.load_default() - for prediction, box in zip(true_predictions, true_boxes): - predicted_label = iob_to_label(prediction) - draw.rectangle(box, outline=label2color[predicted_label]) - draw.text((box[0]+10, box[1]-10), text=predicted_label, fill=label2color[predicted_label], font=font) - - return image - - -title = "Invoice Information extraction by Mohit Kumar" -description = "Invoice Information Extraction for Biller Name, Biller Address, Biller post_code, Due_date, GST, Invoice_date, Invoice_number, Subtotal and Total. To use it, simply upload an image or use the example image below. Results will show up in a few seconds." - - -iface = gr.Interface(fn=process_image, - inputs=gr.inputs.Image(type="pil"), - outputs=gr.outputs.Image(type="pil", label="annotated image"), - title=title, - description=description, - analytics_enabled = True, enable_queue=True) - -iface.launch(inline=False, share=False, debug=False) \ No newline at end of file diff --git a/spaces/molok3/alea31415-onimai-characters/README.md b/spaces/molok3/alea31415-onimai-characters/README.md deleted file mode 100644 index 5d9aca0d0f19a83f49c3f7d200375081c73b0eb4..0000000000000000000000000000000000000000 --- a/spaces/molok3/alea31415-onimai-characters/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Alea31415 Onimai Characters -emoji: 📊 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/monra/freegpt-webui-chimera/client/js/api-key.js b/spaces/monra/freegpt-webui-chimera/client/js/api-key.js deleted file mode 100644 index 9fb9108197406ebe2788baf042686f6a5573ff2c..0000000000000000000000000000000000000000 --- a/spaces/monra/freegpt-webui-chimera/client/js/api-key.js +++ /dev/null @@ -1,33 +0,0 @@ -const showApiKeyButton = document.getElementById("show-api-key-button"); -const apiKeyInput = document.getElementById("API-key"); -const apiKeyOkButton = document.getElementById("api-key-ok-button"); - -showApiKeyButton.addEventListener("click", () => { - showApiKeyButton.classList.add("hidden"); - - apiKeyInput.classList.remove("hidden"); - apiKeyOkButton.classList.remove("hidden"); -}); - -apiKeyOkButton.addEventListener("click", () => { - localStorage.setItem("API-key", apiKeyInput.value); - - apiKeyInput.classList.add("hidden"); - apiKeyOkButton.classList.add("hidden"); - - showApiKeyButton.classList.remove("hidden"); -}); - -window.addEventListener("DOMContentLoaded", () => { - const apiKey = localStorage.getItem("API-key"); - if (apiKey) { - apiKeyInput.value = apiKey; - } -}); - -(function () { - function get_api_key_from_input() { - return apiKeyInput.value; - } - window.get_api_key_from_input = get_api_key_from_input; -})(); diff --git a/spaces/mrm8488/PromptSource/promptsource/seqio_tasks/utils.py b/spaces/mrm8488/PromptSource/promptsource/seqio_tasks/utils.py deleted file mode 100644 index 1b4df95aa161ac06051c2397402342f3922342d2..0000000000000000000000000000000000000000 --- a/spaces/mrm8488/PromptSource/promptsource/seqio_tasks/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -import re - -import datasets -import tensorflow as tf - -import promptsource.utils - - -def feature_to_spec(feature, length=False): - if isinstance(feature, datasets.ClassLabel): - return tf.TensorSpec(shape=() if not length else (None if length == -1 else length,), dtype=tf.int64) - elif isinstance(feature, datasets.Value): - return tf.TensorSpec( - shape=() if not length else (None if length == -1 else length,), dtype=getattr(tf.dtypes, feature.dtype) - ) - elif hasattr(feature, "dtype") and hasattr(feature, "shape"): - return tf.TensorSpec(shape=feature.shape, dtype=feature.dtype) - elif isinstance(feature, datasets.Sequence): - return feature_to_spec(feature.feature, length=feature.length) - elif isinstance(feature, list): - return [feature_to_spec(f, length=length) for f in feature] - elif isinstance(feature, dict): - return {k: feature_to_spec(v, length=length) for k, v in feature.items()} - else: - raise ValueError(f"Unparseable feature type {type(feature)}") - - -def hf_dataset_to_tf_dataset(dataset): - return tf.data.Dataset.from_generator( - dataset.__iter__, output_signature={k: feature_to_spec(v) for k, v in dataset.features.items()} - ) - - -def apply_template(dataset, template): - def map_fn(ex): - ex = promptsource.utils.removeHyphen(ex) - inputs_and_targets = template.apply(ex) - answer_choices = template.get_answer_choices_list(ex) - if len(inputs_and_targets) == 2: - inputs, targets = inputs_and_targets - if targets == "": - ex = {"inputs": inputs, "targets": ""} - else: - ex = {"inputs": inputs, "targets": targets} - # When template results in an empty example, template.apply returns [""] - # Also, if the template gets split wrong, len can be > 2 - # We will filter these out later - else: - ex = {"inputs": "", "targets": ""} - - if answer_choices: - ex["answer_choices"] = answer_choices - - return ex - - def filter_fn(ex): - return len(ex["inputs"]) > 0 and len(ex["targets"]) > 0 - - original_columns = dataset.column_names - dataset = dataset.map(map_fn).filter(filter_fn) - # map keeps original columns, remove them - return dataset.remove_columns(set(original_columns) - {"inputs", "targets", "answer_choices"}) - - -def get_dataset_splits(dataset_name, subset_name=None): - info = datasets.get_dataset_infos(dataset_name) - subset_name = subset_name or list(info.keys())[0] - return info[subset_name].splits - - -def task_clean(text): - # Clean the text according to allowed characters for a task name - return re.sub(r"[^\w\d\._]+", "_", text) - - -def get_task_name(dataset_name, subset_name, template_name): - return task_clean(dataset_name + (f"_{subset_name}_" if subset_name is not None else "_") + template_name) diff --git a/spaces/mshkdm/VToonify/vtoonify/model/bisenet/model.py b/spaces/mshkdm/VToonify/vtoonify/model/bisenet/model.py deleted file mode 100644 index e61c0eb20aaa63065cc17bbcfe27b245f1f0dbf5..0000000000000000000000000000000000000000 --- a/spaces/mshkdm/VToonify/vtoonify/model/bisenet/model.py +++ /dev/null @@ -1,283 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8 -*- - - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -from model.bisenet.resnet import Resnet18 -# from modules.bn import InPlaceABNSync as BatchNorm2d - - -class ConvBNReLU(nn.Module): - def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): - super(ConvBNReLU, self).__init__() - self.conv = nn.Conv2d(in_chan, - out_chan, - kernel_size = ks, - stride = stride, - padding = padding, - bias = False) - self.bn = nn.BatchNorm2d(out_chan) - self.init_weight() - - def forward(self, x): - x = self.conv(x) - x = F.relu(self.bn(x)) - return x - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - -class BiSeNetOutput(nn.Module): - def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): - super(BiSeNetOutput, self).__init__() - self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) - self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) - self.init_weight() - - def forward(self, x): - x = self.conv(x) - x = self.conv_out(x) - return x - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - def get_params(self): - wd_params, nowd_params = [], [] - for name, module in self.named_modules(): - if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): - wd_params.append(module.weight) - if not module.bias is None: - nowd_params.append(module.bias) - elif isinstance(module, nn.BatchNorm2d): - nowd_params += list(module.parameters()) - return wd_params, nowd_params - - -class AttentionRefinementModule(nn.Module): - def __init__(self, in_chan, out_chan, *args, **kwargs): - super(AttentionRefinementModule, self).__init__() - self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) - self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) - self.bn_atten = nn.BatchNorm2d(out_chan) - self.sigmoid_atten = nn.Sigmoid() - self.init_weight() - - def forward(self, x): - feat = self.conv(x) - atten = F.avg_pool2d(feat, feat.size()[2:]) - atten = self.conv_atten(atten) - atten = self.bn_atten(atten) - atten = self.sigmoid_atten(atten) - out = torch.mul(feat, atten) - return out - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - -class ContextPath(nn.Module): - def __init__(self, *args, **kwargs): - super(ContextPath, self).__init__() - self.resnet = Resnet18() - self.arm16 = AttentionRefinementModule(256, 128) - self.arm32 = AttentionRefinementModule(512, 128) - self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) - self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) - self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0) - - self.init_weight() - - def forward(self, x): - H0, W0 = x.size()[2:] - feat8, feat16, feat32 = self.resnet(x) - H8, W8 = feat8.size()[2:] - H16, W16 = feat16.size()[2:] - H32, W32 = feat32.size()[2:] - - avg = F.avg_pool2d(feat32, feat32.size()[2:]) - avg = self.conv_avg(avg) - avg_up = F.interpolate(avg, (H32, W32), mode='nearest') - - feat32_arm = self.arm32(feat32) - feat32_sum = feat32_arm + avg_up - feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') - feat32_up = self.conv_head32(feat32_up) - - feat16_arm = self.arm16(feat16) - feat16_sum = feat16_arm + feat32_up - feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') - feat16_up = self.conv_head16(feat16_up) - - return feat8, feat16_up, feat32_up # x8, x8, x16 - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - def get_params(self): - wd_params, nowd_params = [], [] - for name, module in self.named_modules(): - if isinstance(module, (nn.Linear, nn.Conv2d)): - wd_params.append(module.weight) - if not module.bias is None: - nowd_params.append(module.bias) - elif isinstance(module, nn.BatchNorm2d): - nowd_params += list(module.parameters()) - return wd_params, nowd_params - - -### This is not used, since I replace this with the resnet feature with the same size -class SpatialPath(nn.Module): - def __init__(self, *args, **kwargs): - super(SpatialPath, self).__init__() - self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3) - self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) - self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) - self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0) - self.init_weight() - - def forward(self, x): - feat = self.conv1(x) - feat = self.conv2(feat) - feat = self.conv3(feat) - feat = self.conv_out(feat) - return feat - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - def get_params(self): - wd_params, nowd_params = [], [] - for name, module in self.named_modules(): - if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): - wd_params.append(module.weight) - if not module.bias is None: - nowd_params.append(module.bias) - elif isinstance(module, nn.BatchNorm2d): - nowd_params += list(module.parameters()) - return wd_params, nowd_params - - -class FeatureFusionModule(nn.Module): - def __init__(self, in_chan, out_chan, *args, **kwargs): - super(FeatureFusionModule, self).__init__() - self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) - self.conv1 = nn.Conv2d(out_chan, - out_chan//4, - kernel_size = 1, - stride = 1, - padding = 0, - bias = False) - self.conv2 = nn.Conv2d(out_chan//4, - out_chan, - kernel_size = 1, - stride = 1, - padding = 0, - bias = False) - self.relu = nn.ReLU(inplace=True) - self.sigmoid = nn.Sigmoid() - self.init_weight() - - def forward(self, fsp, fcp): - fcat = torch.cat([fsp, fcp], dim=1) - feat = self.convblk(fcat) - atten = F.avg_pool2d(feat, feat.size()[2:]) - atten = self.conv1(atten) - atten = self.relu(atten) - atten = self.conv2(atten) - atten = self.sigmoid(atten) - feat_atten = torch.mul(feat, atten) - feat_out = feat_atten + feat - return feat_out - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - def get_params(self): - wd_params, nowd_params = [], [] - for name, module in self.named_modules(): - if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): - wd_params.append(module.weight) - if not module.bias is None: - nowd_params.append(module.bias) - elif isinstance(module, nn.BatchNorm2d): - nowd_params += list(module.parameters()) - return wd_params, nowd_params - - -class BiSeNet(nn.Module): - def __init__(self, n_classes, *args, **kwargs): - super(BiSeNet, self).__init__() - self.cp = ContextPath() - ## here self.sp is deleted - self.ffm = FeatureFusionModule(256, 256) - self.conv_out = BiSeNetOutput(256, 256, n_classes) - self.conv_out16 = BiSeNetOutput(128, 64, n_classes) - self.conv_out32 = BiSeNetOutput(128, 64, n_classes) - self.init_weight() - - def forward(self, x): - H, W = x.size()[2:] - feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature - feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature - feat_fuse = self.ffm(feat_sp, feat_cp8) - - feat_out = self.conv_out(feat_fuse) - feat_out16 = self.conv_out16(feat_cp8) - feat_out32 = self.conv_out32(feat_cp16) - - feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) - feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) - feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) - return feat_out, feat_out16, feat_out32 - - def init_weight(self): - for ly in self.children(): - if isinstance(ly, nn.Conv2d): - nn.init.kaiming_normal_(ly.weight, a=1) - if not ly.bias is None: nn.init.constant_(ly.bias, 0) - - def get_params(self): - wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] - for name, child in self.named_children(): - child_wd_params, child_nowd_params = child.get_params() - if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput): - lr_mul_wd_params += child_wd_params - lr_mul_nowd_params += child_nowd_params - else: - wd_params += child_wd_params - nowd_params += child_nowd_params - return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params - - -if __name__ == "__main__": - net = BiSeNet(19) - net.cuda() - net.eval() - in_ten = torch.randn(16, 3, 640, 480).cuda() - out, out16, out32 = net(in_ten) - print(out.shape) - - net.get_params() diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh deleted file mode 100644 index 947342a0b7d8f50bcf4164b284ef3303a1247b64..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/decode_phone.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# decode into phones (and prepare a new data directory for HMM outputs) - -. ./path.sh - -set -eu - -out_dir= # same as in train.sh -dec_lmparam= # LM hyperparameters (e.g., 7.0.0) -dec_exp= -dec_script= -dec_splits="train valid" -dec_data_dir=$out_dir/dec_data # where to write HMM output - -data_dir=${out_dir}/data - -local/decode.sh --nj 40 --graph_name graph \ - --val_sets "$dec_splits" --decode_script $dec_script \ - $out_dir/exp/$dec_exp $data_dir $data_dir/lang_test - -if [ ! -z $dec_lmparam ]; then - for x in $dec_splits; do - mkdir -p $dec_data_dir/$x - cp $data_dir/$x/{feats.scp,cmvn.scp,utt2spk,spk2utt} $dec_data_dir/$x/ - - tra=$out_dir/exp/$dec_exp/decode_${x}/scoring/${dec_lmparam}.tra - cat $tra | utils/int2sym.pl -f 2- $data_dir/lang/words.txt | \ - sed 's:::g' | sed 's:::g' > $dec_data_dir/${x}/text - utils/fix_data_dir.sh $dec_data_dir/${x} - echo "WER on ${x} is" $(compute-wer ark:$data_dir/${x}_gt/text ark:$dec_data_dir/$x/text | cut -d" " -f2-) - done -fi diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py deleted file mode 100644 index 711ed03483f4089dbe91964a89021b49eeffbedc..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import dynamicconv_cuda -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.incremental_decoding_utils import with_incremental_state -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.unfold import unfold1d -from torch import nn -from torch.autograd import Function - - -class dynamicconvFunction(Function): - @staticmethod - def forward(ctx, x, weights, padding_l): - ctx.padding_l = padding_l - outputs = dynamicconv_cuda.forward(x, weights, padding_l) - variables = [x, weights] - ctx.save_for_backward(*variables) - return outputs[0] - - @staticmethod - def backward(ctx, grad_output): - outputs = dynamicconv_cuda.backward( - grad_output.contiguous(), ctx.padding_l, *ctx.saved_tensors - ) - grad_input, grad_weights = outputs - return grad_input, grad_weights, None - - -@with_incremental_state -class DynamicconvLayer(nn.Module): - def __init__( - self, - input_size, - kernel_size=1, - padding_l=None, - weight_softmax=False, - num_heads=1, - weight_dropout=0.0, - bias=False, - renorm_padding=False, - conv_bias=False, - query_size=None, - ): - - super(DynamicconvLayer, self).__init__() - self.input_size = input_size - self.query_size = input_size if query_size is None else query_size - self.kernel_size = kernel_size - self.padding_l = padding_l - self.num_heads = num_heads - self.weight_softmax = weight_softmax - self.weight_dropout_module = FairseqDropout( - weight_dropout, module_name=self.__class__.__name__ - ) - self.renorm_padding = renorm_padding - self.bias = bias - - self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias) - if conv_bias: - self.conv_bias = nn.Parameter(torch.Tensor(input_size)) - else: - self.conv_bias = None - self.reset_parameters() - - def reset_parameters(self): - nn.init.xavier_uniform_(self.weight_linear.weight) - if self.conv_bias is not None: - nn.init.constant_(self.conv_bias, 0.0) - nn.init.constant_(self.weight_linaer.bias, 0.0) - - def forward(self, x, incremental_state=None, query=None, unfold=None): - - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - # R = C // H - - # during inference time, incremental BMM is faster - if incremental_state is not None: - unfold = ( - x.size(0) > 512 if unfold is None else unfold - ) # use unfold mode as default for long sequence to save memory - unfold = unfold or (incremental_state is not None) - assert query is None - - if query is None: - query = x - if unfold: - output = self._forward_unfolded(x, incremental_state, query) - else: - output = self._forward_expanded(x, incremental_state, query) - - if self.conv_bias is not None: - output = output + self.conv_bias.view(1, 1, -1) - - return output - - # during training time, use CUDA kernel - else: - weight = self.weight_linear(x).view(T, B, H, K) - if self.weight_softmax: - weight = F.softmax(weight, dim=-1) - if self.weight_dropout_module.p: - weight = self.weight_dropout_module(weight) - - weight = weight.permute(1, 2, 3, 0).contiguous() - self.filters = weight - x = x.permute(1, 2, 0).contiguous() - output = dynamicconvFunction.apply(x, weight, self.padding_l).permute( - 2, 0, 1 - ) - if self.conv_bias is not None: - output = output + self.conv_bias.view(1, 1, -1) - return output - - def reorder_incremental_state(self, incremental_state, new_order): - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is not None: - input_buffer = input_buffer.index_select(1, new_order) - self._set_input_buffer(incremental_state, input_buffer) - - def _get_input_buffer(self, incremental_state): - return utils.get_incremental_state(self, incremental_state, "input_buffer") - - def _set_input_buffer(self, incremental_state, new_buffer): - return utils.set_incremental_state( - self, incremental_state, "input_buffer", new_buffer - ) - - def _forward_unfolded(self, x, incremental_state, query): - """The conventional implementation of convolutions. - Unfolding the input by having a window shifting to the right.""" - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - - weight = self.weight_linear(query).view(T * B * H, -1) - - # renorm_padding is only implemented in _forward_expanded - assert not self.renorm_padding or incremental_state is not None - - if incremental_state is not None: - input_buffer = self._get_input_buffer(incremental_state) - if input_buffer is None: - input_buffer = x.new() - x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3) - if self.kernel_size > 1: - self._set_input_buffer( - incremental_state, x_unfold[:, :, :, -self.kernel_size + 1 :] - ) - x_unfold = x_unfold.view(T * B * H, R, -1) - else: - padding_l = self.padding_l - if K > T and padding_l == K - 1: - weight = weight.narrow(1, K - T, T) - K, padding_l = T, T - 1 - # unfold the input: T x B x C --> T' x B x C x K - x_unfold = unfold1d(x, K, padding_l, 0) - x_unfold = x_unfold.view(T * B * H, R, K) - - if self.weight_softmax and not self.renorm_padding: - weight = F.softmax(weight, dim=1) - weight = weight.narrow(1, 0, K) - - if incremental_state is not None: - weight = weight[:, -x_unfold.size(2) :] - K = weight.size(1) - - if self.weight_softmax and self.renorm_padding: - weight = F.softmax(weight, dim=1) - - weight = self.weight_dropout_module(weight, inplace=False) - - output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1 - output = output.view(T, B, C) - return output - - def _forward_expanded(self, x, incremental_stat, query): - """Turn the convolution filters into band matrices and do matrix multiplication. - This is faster when the sequence is short, but less memory efficient. - This is not used in the decoder during inference. - """ - T, B, C = x.size() - K, H = self.kernel_size, self.num_heads - R = C // H - assert R * H == C == self.input_size - weight = self.weight_linear(query).view(T * B * H, -1) - - if not self.renorm_padding: - if self.weight_softmax: - weight = F.softmax(weight, dim=1) - weight = self.weight_dropout_module(weight, inplace=False) - weight = weight.narrow(1, 0, K).contiguous() - weight = weight.view(T, B * H, K).transpose(0, 1) - - x = x.view(T, B * H, R).transpose(0, 1) - if self.weight_softmax and self.renorm_padding: - # turn the convolution filters into band matrices - weight_expanded = weight.new(B * H, T, T + K - 1).fill_(float("-inf")) - weight_expanded.as_strided( - (B * H, T, K), (T * (T + K - 1), T + K, 1) - ).copy_(weight) - weight_expanded = weight_expanded.narrow(2, self.padding_l, T) - # normalize the weight over valid positions like self-attention - weight_expanded = F.softmax(weight_expanded, dim=2) - weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False) - else: - P = self.padding_l - # For efficiency, we cut the kernel size and reduce the padding when the kernel is larger than the length - if K > T and P == K - 1: - weight = weight.narrow(2, K - T, T) - K, P = T, T - 1 - # turn the convolution filters into band matrices - weight_expanded = weight.new_zeros(B * H, T, T + K - 1, requires_grad=False) - weight_expanded.as_strided( - (B * H, T, K), (T * (T + K - 1), T + K, 1) - ).copy_(weight) - weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T - output = torch.bmm(weight_expanded, x) - output = output.transpose(0, 1).contiguous().view(T, B, C) - return output diff --git a/spaces/mthsk/sovits-100orangejuice/vdecoder/hifigan/env.py b/spaces/mthsk/sovits-100orangejuice/vdecoder/hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/mthsk/sovits-100orangejuice/vdecoder/hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/mthsk/sovits-models-misc/modules/crepe.py b/spaces/mthsk/sovits-models-misc/modules/crepe.py deleted file mode 100644 index 0bff0e3474de6483290b56993f9b845e91ef9702..0000000000000000000000000000000000000000 --- a/spaces/mthsk/sovits-models-misc/modules/crepe.py +++ /dev/null @@ -1,327 +0,0 @@ -from typing import Optional,Union -try: - from typing import Literal -except Exception as e: - from typing_extensions import Literal -import numpy as np -import torch -import torchcrepe -from torch import nn -from torch.nn import functional as F -import scipy - -#from:https://github.com/fishaudio/fish-diffusion - -def repeat_expand( - content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest" -): - """Repeat content to target length. - This is a wrapper of torch.nn.functional.interpolate. - - Args: - content (torch.Tensor): tensor - target_len (int): target length - mode (str, optional): interpolation mode. Defaults to "nearest". - - Returns: - torch.Tensor: tensor - """ - - ndim = content.ndim - - if content.ndim == 1: - content = content[None, None] - elif content.ndim == 2: - content = content[None] - - assert content.ndim == 3 - - is_np = isinstance(content, np.ndarray) - if is_np: - content = torch.from_numpy(content) - - results = torch.nn.functional.interpolate(content, size=target_len, mode=mode) - - if is_np: - results = results.numpy() - - if ndim == 1: - return results[0, 0] - elif ndim == 2: - return results[0] - - -class BasePitchExtractor: - def __init__( - self, - hop_length: int = 512, - f0_min: float = 50.0, - f0_max: float = 1100.0, - keep_zeros: bool = True, - ): - """Base pitch extractor. - - Args: - hop_length (int, optional): Hop length. Defaults to 512. - f0_min (float, optional): Minimum f0. Defaults to 50.0. - f0_max (float, optional): Maximum f0. Defaults to 1100.0. - keep_zeros (bool, optional): Whether keep zeros in pitch. Defaults to True. - """ - - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.keep_zeros = keep_zeros - - def __call__(self, x, sampling_rate=44100, pad_to=None): - raise NotImplementedError("BasePitchExtractor is not callable.") - - def post_process(self, x, sampling_rate, f0, pad_to): - if isinstance(f0, np.ndarray): - f0 = torch.from_numpy(f0).float().to(x.device) - - if pad_to is None: - return f0 - - f0 = repeat_expand(f0, pad_to) - - if self.keep_zeros: - return f0 - - vuv_vector = torch.zeros_like(f0) - vuv_vector[f0 > 0.0] = 1.0 - vuv_vector[f0 <= 0.0] = 0.0 - - # 去掉0频率, 并线性插值 - nzindex = torch.nonzero(f0).squeeze() - f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy() - time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy() - time_frame = np.arange(pad_to) * self.hop_length / sampling_rate - - if f0.shape[0] <= 0: - return torch.zeros(pad_to, dtype=torch.float, device=x.device),torch.zeros(pad_to, dtype=torch.float, device=x.device) - - if f0.shape[0] == 1: - return torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[0],torch.ones(pad_to, dtype=torch.float, device=x.device) - - # 大概可以用 torch 重写? - f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1]) - vuv_vector = vuv_vector.cpu().numpy() - vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0)) - - return f0,vuv_vector - - -class MaskedAvgPool1d(nn.Module): - def __init__( - self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0 - ): - """An implementation of mean pooling that supports masked values. - - Args: - kernel_size (int): The size of the median pooling window. - stride (int, optional): The stride of the median pooling window. Defaults to None. - padding (int, optional): The padding of the median pooling window. Defaults to 0. - """ - - super(MaskedAvgPool1d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - - def forward(self, x, mask=None): - ndim = x.dim() - if ndim == 2: - x = x.unsqueeze(1) - - assert ( - x.dim() == 3 - ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)" - - # Apply the mask by setting masked elements to zero, or make NaNs zero - if mask is None: - mask = ~torch.isnan(x) - - # Ensure mask has the same shape as the input tensor - assert x.shape == mask.shape, "Input tensor and mask must have the same shape" - - masked_x = torch.where(mask, x, torch.zeros_like(x)) - # Create a ones kernel with the same number of channels as the input tensor - ones_kernel = torch.ones(x.size(1), 1, self.kernel_size, device=x.device) - - # Perform sum pooling - sum_pooled = nn.functional.conv1d( - masked_x, - ones_kernel, - stride=self.stride, - padding=self.padding, - groups=x.size(1), - ) - - # Count the non-masked (valid) elements in each pooling window - valid_count = nn.functional.conv1d( - mask.float(), - ones_kernel, - stride=self.stride, - padding=self.padding, - groups=x.size(1), - ) - valid_count = valid_count.clamp(min=1) # Avoid division by zero - - # Perform masked average pooling - avg_pooled = sum_pooled / valid_count - - # Fill zero values with NaNs - avg_pooled[avg_pooled == 0] = float("nan") - - if ndim == 2: - return avg_pooled.squeeze(1) - - return avg_pooled - - -class MaskedMedianPool1d(nn.Module): - def __init__( - self, kernel_size: int, stride: Optional[int] = None, padding: Optional[int] = 0 - ): - """An implementation of median pooling that supports masked values. - - This implementation is inspired by the median pooling implementation in - https://gist.github.com/rwightman/f2d3849281624be7c0f11c85c87c1598 - - Args: - kernel_size (int): The size of the median pooling window. - stride (int, optional): The stride of the median pooling window. Defaults to None. - padding (int, optional): The padding of the median pooling window. Defaults to 0. - """ - - super(MaskedMedianPool1d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - - def forward(self, x, mask=None): - ndim = x.dim() - if ndim == 2: - x = x.unsqueeze(1) - - assert ( - x.dim() == 3 - ), "Input tensor must have 2 or 3 dimensions (batch_size, channels, width)" - - if mask is None: - mask = ~torch.isnan(x) - - assert x.shape == mask.shape, "Input tensor and mask must have the same shape" - - masked_x = torch.where(mask, x, torch.zeros_like(x)) - - x = F.pad(masked_x, (self.padding, self.padding), mode="reflect") - mask = F.pad( - mask.float(), (self.padding, self.padding), mode="constant", value=0 - ) - - x = x.unfold(2, self.kernel_size, self.stride) - mask = mask.unfold(2, self.kernel_size, self.stride) - - x = x.contiguous().view(x.size()[:3] + (-1,)) - mask = mask.contiguous().view(mask.size()[:3] + (-1,)).to(x.device) - - # Combine the mask with the input tensor - #x_masked = torch.where(mask.bool(), x, torch.fill_(torch.zeros_like(x),float("inf"))) - x_masked = torch.where(mask.bool(), x, torch.FloatTensor([float("inf")]).to(x.device)) - - # Sort the masked tensor along the last dimension - x_sorted, _ = torch.sort(x_masked, dim=-1) - - # Compute the count of non-masked (valid) values - valid_count = mask.sum(dim=-1) - - # Calculate the index of the median value for each pooling window - median_idx = (torch.div((valid_count - 1), 2, rounding_mode='trunc')).clamp(min=0) - - # Gather the median values using the calculated indices - median_pooled = x_sorted.gather(-1, median_idx.unsqueeze(-1).long()).squeeze(-1) - - # Fill infinite values with NaNs - median_pooled[torch.isinf(median_pooled)] = float("nan") - - if ndim == 2: - return median_pooled.squeeze(1) - - return median_pooled - - -class CrepePitchExtractor(BasePitchExtractor): - def __init__( - self, - hop_length: int = 512, - f0_min: float = 50.0, - f0_max: float = 1100.0, - threshold: float = 0.05, - keep_zeros: bool = False, - device = None, - model: Literal["full", "tiny"] = "full", - use_fast_filters: bool = True, - ): - super().__init__(hop_length, f0_min, f0_max, keep_zeros) - - self.threshold = threshold - self.model = model - self.use_fast_filters = use_fast_filters - self.hop_length = hop_length - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - if self.use_fast_filters: - self.median_filter = MaskedMedianPool1d(3, 1, 1).to(device) - self.mean_filter = MaskedAvgPool1d(3, 1, 1).to(device) - - def __call__(self, x, sampling_rate=44100, pad_to=None): - """Extract pitch using crepe. - - - Args: - x (torch.Tensor): Audio signal, shape (1, T). - sampling_rate (int, optional): Sampling rate. Defaults to 44100. - pad_to (int, optional): Pad to length. Defaults to None. - - Returns: - torch.Tensor: Pitch, shape (T // hop_length,). - """ - - assert x.ndim == 2, f"Expected 2D tensor, got {x.ndim}D tensor." - assert x.shape[0] == 1, f"Expected 1 channel, got {x.shape[0]} channels." - - x = x.to(self.dev) - f0, pd = torchcrepe.predict( - x, - sampling_rate, - self.hop_length, - self.f0_min, - self.f0_max, - pad=True, - model=self.model, - batch_size=1024, - device=x.device, - return_periodicity=True, - ) - - # Filter, remove silence, set uv threshold, refer to the original warehouse readme - if self.use_fast_filters: - pd = self.median_filter(pd) - else: - pd = torchcrepe.filter.median(pd, 3) - - pd = torchcrepe.threshold.Silence(-60.0)(pd, x, sampling_rate, 512) - f0 = torchcrepe.threshold.At(self.threshold)(f0, pd) - - if self.use_fast_filters: - f0 = self.mean_filter(f0) - else: - f0 = torchcrepe.filter.mean(f0, 3) - - f0 = torch.where(torch.isnan(f0), torch.full_like(f0, 0), f0)[0] - - return self.post_process(x, sampling_rate, f0, pad_to) diff --git a/spaces/munichnlp/README/README.md b/spaces/munichnlp/README/README.md deleted file mode 100644 index a79442e7ee95ed07c1d6254d7d07df0b060dd0ce..0000000000000000000000000000000000000000 --- a/spaces/munichnlp/README/README.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: README -emoji: 🥨 -colorFrom: blue -colorTo: yellow -sdk: static -pinned: false ---- - -

      Hallo und Herzlich Wilkommen to Munich NLP community! 🤗

      - -

      Language is at the heart of human intelligence. It therefore is and must be at the heart of our efforts to build artificial intelligence. We founded this community discuss latest developments and also stimulate exchange on research and innovation around NLP. -Join the Munich NLP discord community to learn and exchange about Natural Language Processing, Machine Learning, AI, and related topics. -We plan to do (hybrid) meetups including technical talks, knowledge sharing sessions and networking events. -If you would like to give a talk or host one of our events, please reach out to the organizers! -We plan to record events, and you can watch them on our YouTube Channel. -We are looking for sponsors, if interested, drop us a line to munichnlp@gmail.com or DM one of moderators

      - -

      Connect with Us:

      -

      -BgFaZgZ38N diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py deleted file mode 100644 index b3c2b490e88bb3b55c6bb717e08f97f7a396d5fa..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 - - -import math -import os -import random - -import braceexpand -import webdataset as wds - -DEFAULT_CATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'configs', 'places2-categories_157.txt') - -def is_good_key(key, cats): - return any(c in key for c in cats) - - -def main(args): - if args.categories == 'nofilter': - good_categories = None - else: - with open(args.categories, 'r') as f: - good_categories = set(line.strip().split(' ')[0] for line in f if line.strip()) - - all_input_files = list(braceexpand.braceexpand(args.infile)) - chunk_size = int(math.ceil(len(all_input_files) / args.n_read_streams)) - - input_iterators = [iter(wds.Dataset(all_input_files[start : start + chunk_size]).shuffle(args.shuffle_buffer)) - for start in range(0, len(all_input_files), chunk_size)] - output_datasets = [wds.ShardWriter(args.outpattern.format(i)) for i in range(args.n_write_streams)] - - good_readers = list(range(len(input_iterators))) - step_i = 0 - good_samples = 0 - bad_samples = 0 - while len(good_readers) > 0: - if step_i % args.print_freq == 0: - print(f'Iterations done {step_i}; readers alive {good_readers}; good samples {good_samples}; bad samples {bad_samples}') - - step_i += 1 - - ri = random.choice(good_readers) - try: - sample = next(input_iterators[ri]) - except StopIteration: - good_readers = list(set(good_readers) - {ri}) - continue - - if good_categories is not None and not is_good_key(sample['__key__'], good_categories): - bad_samples += 1 - continue - - wi = random.randint(0, args.n_write_streams - 1) - output_datasets[wi].write(sample) - good_samples += 1 - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('--categories', type=str, default=DEFAULT_CATS_FILE) - aparser.add_argument('--shuffle-buffer', type=int, default=10000) - aparser.add_argument('--n-read-streams', type=int, default=10) - aparser.add_argument('--n-write-streams', type=int, default=10) - aparser.add_argument('--print-freq', type=int, default=1000) - aparser.add_argument('infile', type=str) - aparser.add_argument('outpattern', type=str) - - main(aparser.parse_args()) diff --git a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/loggers/wandb/sweep.py b/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/loggers/wandb/sweep.py deleted file mode 100644 index 206059bc30bff425fd3a7b2ee83a40a642a8e8c6..0000000000000000000000000000000000000000 --- a/spaces/nakamura196/yolov5-kunshujo/ultralytics/yolov5/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent - hyp_dict = vars(wandb.config).get("_items") - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/spaces/nakas/MusicGenDemucs/tests/models/test_musicgen.py b/spaces/nakas/MusicGenDemucs/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/nateraw/jupyterlab-test2/update_config.py b/spaces/nateraw/jupyterlab-test2/update_config.py deleted file mode 100644 index 7b5a8b6b825332670971e9aea465e3ab4ecb728a..0000000000000000000000000000000000000000 --- a/spaces/nateraw/jupyterlab-test2/update_config.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Here, we override config to have num_workers=0 because -of a limitation in HF Spaces Docker /dev/shm. -""" - -import json -from pathlib import Path - - -def main(config_file="configs/44k/config.json"): - config_path = Path(config_file) - data = json.loads(config_path.read_text()) - data['train']['batch_size'] = 16 - data['train']['eval_interval'] = 800 - data['train']['num_workers'] = 0 - data['train']['persistent_workers'] = False - data['train']['push_to_hub'] = True - data['train']['repo_id'] = tuple(data['spk'])[0] - data['train']['private'] = True - config_path.write_text(json.dumps(data, indent=2, sort_keys=False)) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cam350 10 8 Torrent.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cam350 10 8 Torrent.md deleted file mode 100644 index 83bbb0a2ac925de77a06dbb3d80ab85646527677..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Cam350 10 8 Torrent.md +++ /dev/null @@ -1,20 +0,0 @@ -
      -

      How to Download and Install CAM350 10.8 for PCB Design

      -

      CAM350 is a powerful software for creating, testing and verifying PCB designs. It is widely used by engineers and PCB fabricators to streamline the transition of engineering data to fabrication. CAM350 supports Windows XP/XP Professional/Vista/7/8/10/11 operating systems and can be purchased from the developer's website[^1^].

      -

      However, some users may want to try CAM350 before buying it or use it for personal projects without paying the license fee. In that case, they may look for a torrent file that contains the cracked version of CAM350 10.8, which is the latest version available as of April 2023. A torrent file is a small file that contains information about the location and availability of a larger file that can be downloaded using a peer-to-peer network.

      -

      Cam350 10 8 Torrent


      DOWNLOAD ……… https://urlcod.com/2uI9yp



      -

      While downloading and installing CAM350 10.8 from a torrent may seem tempting, it is not recommended for several reasons. First, it is illegal and violates the intellectual property rights of the developer. Second, it may expose your computer to viruses, malware or spyware that can harm your system or steal your data. Third, it may not work properly or have some features disabled or corrupted. Fourth, it may not be compatible with your hardware or software configuration or cause conflicts with other programs.

      -

      Therefore, if you want to use CAM350 10.8 for PCB design, you should download it from the official website[^1^] and pay for the license fee. This way, you will get a safe, reliable and updated version of the software that can help you create high-quality PCB designs. You will also get technical support and customer service from the developer in case you encounter any issues or have any questions.

      -

      If you still want to download and install CAM350 10.8 from a torrent, you should do so at your own risk and responsibility. Here are some steps that you can follow:

      -
        -
      1. Search for "Cam350 10 8 Torrent" on a torrent search engine such as The Pirate Bay[^3^], Kickass Torrents[^4^] or Le Jardin de Merveille[^5^]. You may need to use a VPN or proxy service to access these websites as they may be blocked by your ISP or government.
      2. -
      3. Select a torrent file that has a high number of seeders (people who have the complete file and are sharing it) and leechers (people who are downloading the file). This will ensure faster download speed and availability of the file.
      4. -
      5. Download the torrent file to your computer and open it with a torrent client such as uTorrent, BitTorrent or qBittorrent. These are programs that allow you to download files from other peers using the torrent protocol.
      6. -
      7. Wait for the download to complete. The file size may vary depending on the source, but it should be around 500 MB.
      8. -
      9. Extract the downloaded file using a program such as WinRAR, 7-Zip or PeaZip. These are programs that allow you to compress and decompress files in various formats.
      10. -
      11. Run the setup.exe file and follow the instructions on the screen to install CAM350 10.8 on your computer. You may need to disable your antivirus or firewall software temporarily as they may detect the cracked version as a threat.
      12. -
      13. Enjoy using CAM350 10.8 for PCB design.
      14. -
      -

      Note: This article is for informational purposes only and does not endorse or encourage piracy or illegal downloading of software. We are not responsible for any damages or consequences that may result from following these steps.

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Install Pmdg 737 Ngx Liveries [BEST].md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Install Pmdg 737 Ngx Liveries [BEST].md deleted file mode 100644 index b87d71db064de7ad33b13b560ed4a16c1cfd4764..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/How To Install Pmdg 737 Ngx Liveries [BEST].md +++ /dev/null @@ -1,27 +0,0 @@ -
      -

      How To Install Pmdg 737 Ngx Liveries: A Step-By-Step Guide

      -

      If you are a fan of flight simulation games, you might have heard of PMDG, a company that produces high-quality add-ons for popular simulators like Microsoft Flight Simulator and Prepar3D. One of their most popular products is the PMDG 737 NGX, a realistic simulation of the Boeing 737 Next Generation aircraft. The PMDG 737 NGX comes with a variety of liveries, which are the paint schemes and logos of different airlines that operate the aircraft. However, you might want to install more liveries to customize your flying experience or to match your favorite airline. In this article, we will show you how to install PMDG 737 NGX liveries from different sources, such as the official PMDG website, third-party websites, or YouTube videos.

      -

      How To Install Pmdg 737 Ngx Liveries


      Download Filehttps://urlcod.com/2uIbrO



      -

      How To Install PMDG 737 NGX Liveries From The Official PMDG Website

      -

      The easiest way to install PMDG 737 NGX liveries is to use the official PMDG website, which offers a large selection of free liveries for download. Here are the steps to follow:

      -
        -
      1. Go to https://pmdg.com/pmdg-737ngxu-base-package-for-prepar3d-v4-v5/ and scroll down to the "Livery Downloads" section.
      2. -
      3. Select the livery you want to download and click on the "Download" button.
      4. -
      5. Save the file to your computer and unzip it using a program like WinZip or WinRAR.
      6. -
      7. Open the folder that contains the unzipped files and look for a file named "PMDG_Livery_Manager.exe". Double-click on it to launch the PMDG Livery Manager.
      8. -
      9. In the PMDG Livery Manager, select the "PMDG 737 NGX" from the drop-down menu at the top left corner.
      10. -
      11. Click on the "Select Livery to Install" button and browse to the folder where you saved the unzipped files. Select the file that ends with ".ptp" and click "Open".
      12. -
      13. The livery will be added to the list of available liveries in the PMDG Livery Manager. You can check or uncheck the box next to each livery to enable or disable it.
      14. -
      15. Click on the "Install Selected Liveries" button at the bottom right corner to install the selected liveries.
      16. -
      17. Close the PMDG Livery Manager and launch your flight simulator. You should be able to see the new liveries in the aircraft selection menu.
      18. -
      -

      How To Install PMDG 737 NGX Liveries From Third-Party Websites

      -

      If you want more variety or custom liveries, you can also install PMDG 737 NGX liveries from third-party websites that offer user-made liveries for free or for a fee. Some of these websites are:

      -
        -
      • https://www.flightsim.to/: A website that offers thousands of liveries, mods, sceneries, and tools for Microsoft Flight Simulator 2020.
      • -
      • https://library.avsim.net/: A website that offers hundreds of thousands of files for various flight simulators, including liveries, aircraft, sceneries, utilities, and more.
      • -
      • https://www.flightsim.com/: A website that offers over 200,000 files for various flight simulators, including liveries, aircraft, sceneries, utilities, and more.
      • -
      -

      The steps to install PMDG 737 NGX liveries from these websites are similar to those from the official PMDG website, except that you need to register an account and log in before you can download any files. Also, some files might not come with a ".ptp" file but with a

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/nettsz/stabilityai-stable-diffusion-2/README.md b/spaces/nettsz/stabilityai-stable-diffusion-2/README.md deleted file mode 100644 index fea535695ca49dc3d5ec21bb84014ed942d6dc50..0000000000000000000000000000000000000000 --- a/spaces/nettsz/stabilityai-stable-diffusion-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 -emoji: 👀 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/niew/vits-uma-genshin-honka/attentions.py b/spaces/niew/vits-uma-genshin-honka/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/niew/vits-uma-genshin-honka/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/TensorMask/tensormask/config.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/TensorMask/tensormask/config.py deleted file mode 100644 index cf62d7aea23a9bdf637c9dc80b810e2413c9c0ae..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/TensorMask/tensormask/config.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -from detectron2.config import CfgNode as CN - - -def add_tensormask_config(cfg): - """ - Add config for TensorMask. - """ - cfg.MODEL.TENSOR_MASK = CN() - - # Anchor parameters - cfg.MODEL.TENSOR_MASK.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6", "p7"] - - # Convolutions to use in the towers - cfg.MODEL.TENSOR_MASK.NUM_CONVS = 4 - - # Number of foreground classes. - cfg.MODEL.TENSOR_MASK.NUM_CLASSES = 80 - # Channel size for the classification tower - cfg.MODEL.TENSOR_MASK.CLS_CHANNELS = 256 - - cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST = 0.05 - # Only the top (1000 * #levels) candidate boxes across all levels are - # considered jointly during test (to improve speed) - cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST = 6000 - cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST = 0.5 - - # Box parameters - # Channel size for the box tower - cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS = 128 - # Weights on (dx, dy, dw, dh) - cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS = (1.5, 1.5, 0.75, 0.75) - - # Loss parameters - cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA = 3.0 - cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA = 0.3 - - # Mask parameters - # Channel size for the mask tower - cfg.MODEL.TENSOR_MASK.MASK_CHANNELS = 128 - # Mask loss weight - cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT = 2.0 - # weight on positive pixels within the mask - cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT = 1.5 - # Whether to predict in the aligned representation - cfg.MODEL.TENSOR_MASK.ALIGNED_ON = False - # Whether to use the bipyramid architecture - cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON = False diff --git a/spaces/niuzhiwei/stabilityai-stable-diffusion-2-1/app.py b/spaces/niuzhiwei/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/niuzhiwei/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/wavegru_mod.cc b/spaces/ntt123/vietnam-male-voice-wavegru-tts/wavegru_mod.cc deleted file mode 100644 index 888f0455cfa0d8c7964771172251fd24b24bd571..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/wavegru_mod.cc +++ /dev/null @@ -1,150 +0,0 @@ -/* -WaveGRU: -> Embed > GRU > O1 > O2 > Sampling > ... -*/ - -#include -#include -#include - -#include -#include -#include - -#include "sparse_matmul/sparse_matmul.h" -namespace py = pybind11; -using namespace std; - -using fvec = std::vector; -using ivec = std::vector; -using fndarray = py::array_t; -using indarray = py::array_t; -using mat = csrblocksparse::CsrBlockSparseMatrix; -using vec = csrblocksparse::CacheAlignedVector; -using masked_mat = csrblocksparse::MaskedSparseMatrix; - -mat create_mat(int h, int w) { - auto m = masked_mat(w, h, 0.90, 4, 4, 0.0, true); - auto a = mat(m); - return a; -} - -struct WaveGRU { - int hidden_dim; - int repeat_factor; - mat m; - vec b; - vec z, r, hh, zrh; - vec fco1, fco2; - vec o1b, o2b; - vec t; - vec h; - vec logits; - mat o1, o2; - std::vector embed; - - WaveGRU(int hidden_dim, int repeat_factor) - : hidden_dim(hidden_dim), - repeat_factor(repeat_factor), - b(3*hidden_dim), - t(3*hidden_dim), - zrh(3*hidden_dim), - z(hidden_dim), - r(hidden_dim), - hh(hidden_dim), - fco1(hidden_dim), - fco2(256), - h(hidden_dim), - o1b(hidden_dim), - o2b(256), - logits(256) { - m = create_mat(hidden_dim, 3*hidden_dim); - o1 = create_mat(hidden_dim, hidden_dim); - o2 = create_mat(hidden_dim, 256); - embed = std::vector(); - for (int i = 0; i < 256; i++) { - embed.emplace_back(hidden_dim * 3); - embed[i].FillRandom(); - } - } - - void load_embed(fndarray embed_weights) { - auto a_embed = embed_weights.unchecked<2>(); - for (int i = 0; i < 256; i++) { - for (int j = 0; j < hidden_dim * 3; j++) embed[i][j] = a_embed(i, j); - } - } - - mat load_linear(vec& bias, fndarray w, indarray mask, fndarray b) { - auto w_ptr = static_cast(w.request().ptr); - auto mask_ptr = static_cast(mask.request().ptr); - auto rb = b.unchecked<1>(); - // load bias, scale by 1/4 - for (int i = 0; i < rb.shape(0); i++) bias[i] = rb(i) / 4; - // load weights - masked_mat mm(w.shape(0), w.shape(1), mask_ptr, w_ptr); - mat mmm(mm); - return mmm; - } - - void load_weights(fndarray m, indarray m_mask, fndarray b, - fndarray o1, indarray o1_mask, - fndarray o1b, fndarray o2, - indarray o2_mask, fndarray o2b) { - this->m = load_linear(this->b, m, m_mask, b); - this->o1 = load_linear(this->o1b, o1, o1_mask, o1b); - this->o2 = load_linear(this->o2b, o2, o2_mask, o2b); - } - - std::vector inference(fndarray ft, float temperature) { - auto rft = ft.unchecked<2>(); - int value = 127; - std::vector signal(rft.shape(0) * repeat_factor); - h.FillZero(); - for (int index = 0; index < signal.size(); index++) { - m.SpMM_bias(h, b, &zrh, false); - - for (int i = 0; i < 3 * hidden_dim; i++) t[i] = embed[value][i] + rft(index / repeat_factor, i); - for (int i = 0; i < hidden_dim; i++) { - z[i] = zrh[i] + t[i]; - r[i] = zrh[hidden_dim + i] + t[hidden_dim + i]; - } - - z.Sigmoid(); - r.Sigmoid(); - - for (int i = 0; i < hidden_dim; i++) { - hh[i] = zrh[hidden_dim * 2 + i] * r[i] + t[hidden_dim * 2 + i]; - } - hh.Tanh(); - for (int i = 0; i < hidden_dim; i++) { - h[i] = (1. - z[i]) * h[i] + z[i] * hh[i]; - } - o1.SpMM_bias(h, o1b, &fco1, true); - o2.SpMM_bias(fco1, o2b, &fco2, false); - // auto max_logit = fco2[0]; - // for (int i = 1; i <= 255; ++i) { - // max_logit = max(max_logit, fco2[i]); - // } - // float total = 0.0; - // for (int i = 0; i <= 255; ++i) { - // logits[i] = csrblocksparse::fast_exp(fco2[i] - max_logit); - // total += logits[i]; - // } - // for (int i = 0; i <= 255; ++i) { - // if (logits[i] < total / 1024.0) fco2[i] = -1e9; - // } - value = fco2.Sample(temperature); - signal[index] = value; - } - return signal; - } -}; - -PYBIND11_MODULE(wavegru_mod, m) { - py::class_(m, "WaveGRU") - .def(py::init()) - .def("load_embed", &WaveGRU::load_embed) - .def("load_weights", &WaveGRU::load_weights) - .def("inference", &WaveGRU::inference); -} diff --git a/spaces/odettecantswim/vits-models-genshin/text/mandarin.py b/spaces/odettecantswim/vits-models-genshin/text/mandarin.py deleted file mode 100644 index 162e1b912dabec4b448ccd3d00d56306f82ce076..0000000000000000000000000000000000000000 --- a/spaces/odettecantswim/vits-models-genshin/text/mandarin.py +++ /dev/null @@ -1,326 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/metrics/ssim.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/metrics/ssim.py deleted file mode 100644 index 2a3a431813678cf5d02f0d0b8185712be16f9e24..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/metrics/ssim.py +++ /dev/null @@ -1,46 +0,0 @@ -import cv2 -import numpy as np - - -def calculate_ssim(img1, img2): - C1 = (0.01 * 255)**2 - C2 = (0.03 * 255)**2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1**2 - mu2_sq = mu2**2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * - (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -def ssim(img1, img2): - '''calculate SSIM - the same outputs as MATLAB's - img1, img2: [0, 255] - ''' - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - if img1.ndim == 2: - return calculate_ssim(img1, img2) - elif img1.ndim == 3: - if img1.shape[2] == 3: - ssims = [] - for i in range(3): - ssims.append(calculate_ssim(img1[:, :, i], img2[:, :, i])) - return np.array(ssims).mean() - elif img1.shape[2] == 1: - return calculate_ssim(np.squeeze(img1), np.squeeze(img2)) - else: - raise ValueError('Wrong input image dimensions.') \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/lafc_single.py b/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/lafc_single.py deleted file mode 100644 index 50ec653d9e2ab306891f9520c2de46781827ccbc..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/FGT/models/lafc_single.py +++ /dev/null @@ -1,114 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn as nn -import functools -from .BaseNetwork import BaseNetwork -from models.utils.reconstructionLayers import make_layer, ResidualBlock_noBN - - -class Model(nn.Module): - def __init__(self, config): - super(Model, self).__init__() - self.net = P3DNet(config['num_flows'], config['cnum'], config['in_channel'], config['PASSMASK'], - config['use_residual'], - config['resBlocks'], config['use_bias'], config['conv_type'], config['init_weights']) - - def forward(self, flows, masks, edges=None): - ret = self.net(flows, masks, edges) - return ret - - -class P3DNet(BaseNetwork): - def __init__(self, num_flows, num_feats, in_channels, passmask, use_residual, res_blocks, - use_bias, conv_type, init_weights): - super().__init__(conv_type) - self.passmask = passmask - self.encoder2 = nn.Sequential( - nn.ReplicationPad2d(2), - self.ConvBlock2d(in_channels, num_feats, kernel_size=5, stride=1, padding=0, bias=use_bias, norm=None), - self.ConvBlock2d(num_feats, num_feats * 2, kernel_size=3, stride=2, padding=1, bias=use_bias, norm=None) - ) - self.encoder4 = nn.Sequential( - self.ConvBlock2d(num_feats * 2, num_feats * 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None), - self.ConvBlock2d(num_feats * 2, num_feats * 4, kernel_size=3, stride=2, padding=1, bias=use_bias, norm=None) - ) - residualBlock = functools.partial(ResidualBlock_noBN, nf=num_feats * 4) - self.res_blocks = make_layer(residualBlock, res_blocks) - self.resNums = res_blocks - # dilation convolution to enlarge the receptive field - self.middle = nn.Sequential( - self.ConvBlock2d(num_feats * 4, num_feats * 4, kernel_size=3, stride=1, padding=8, bias=use_bias, - dilation=8, norm=None), - self.ConvBlock2d(num_feats * 4, num_feats * 4, kernel_size=3, stride=1, padding=4, bias=use_bias, - dilation=4, norm=None), - self.ConvBlock2d(num_feats * 4, num_feats * 4, kernel_size=3, stride=1, padding=2, bias=use_bias, - dilation=2, norm=None), - self.ConvBlock2d(num_feats * 4, num_feats * 4, kernel_size=3, stride=1, padding=1, bias=use_bias, - dilation=1, norm=None), - ) - self.decoder2 = nn.Sequential( - self.DeconvBlock2d(num_feats * 8, num_feats * 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None), - self.ConvBlock2d(num_feats * 2, num_feats * 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None), - self.ConvBlock2d(num_feats * 2, num_feats * 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None) - ) - self.decoder = nn.Sequential( - self.DeconvBlock2d(num_feats * 4, num_feats, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None), - self.ConvBlock2d(num_feats, num_feats // 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None), - self.ConvBlock2d(num_feats // 2, 2, kernel_size=3, stride=1, padding=1, bias=use_bias, - norm=None) - ) - self.edgeDetector = EdgeDetection(conv_type) - if init_weights: - self.init_weights() - - def forward(self, flows, masks, edges=None): - if self.passmask: - inputs = torch.cat((flows, masks), dim=1) - else: - inputs = flows - if edges is not None: - inputs = torch.cat((inputs, edges), dim=1) - e2 = self.encoder2(inputs) - e4 = self.encoder4(e2) - if self.resNums > 0: - e4_res = self.res_blocks(e4) - else: - e4_res = e4 - c_e4_filled = self.middle(e4_res) - c_e4 = torch.cat((c_e4_filled, e4), dim=1) - c_e2Post = self.decoder2(c_e4) - c_e2 = torch.cat((c_e2Post, e2), dim=1) - output = self.decoder(c_e2) - edge = self.edgeDetector(output) - return output, edge - - -class EdgeDetection(BaseNetwork): - def __init__(self, conv_type, in_channels=2, out_channels=1, mid_channels=16): - super(EdgeDetection, self).__init__(conv_type) - self.projection = self.ConvBlock2d(in_channels=in_channels, out_channels=mid_channels, kernel_size=3, stride=1, - padding=1, norm=None) - self.mid_layer_1 = self.ConvBlock2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=3, - stride=1, padding=1, norm=None) - self.mid_layer_2 = self.ConvBlock2d(in_channels=mid_channels, out_channels=mid_channels, kernel_size=3, - stride=1, padding=1, activation=None, norm=None) - self.l_relu = nn.LeakyReLU() - self.out_layer = self.ConvBlock2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, - activation=None, norm=None) - - def forward(self, flow): - flow = self.projection(flow) - edge = self.mid_layer_1(flow) - edge = self.mid_layer_2(edge) - edge = self.l_relu(flow + edge) - edge = self.out_layer(edge) - edge = torch.sigmoid(edge) - return edge - - diff --git a/spaces/ondrejbiza/isa/main.py b/spaces/ondrejbiza/isa/main.py deleted file mode 100644 index e7cd9d94bcc1229b0ae2efccf4ac4000fbfb03a6..0000000000000000000000000000000000000000 --- a/spaces/ondrejbiza/isa/main.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Main file for running the model trainer.""" - -from absl import app -from absl import flags -from absl import logging - -from clu import platform -import jax -from ml_collections import config_flags - -import tensorflow as tf - - -from invariant_slot_attention.lib import trainer - -FLAGS = flags.FLAGS - -config_flags.DEFINE_config_file( - "config", None, "Config file.") -flags.DEFINE_string("workdir", None, "Work unit directory.") -flags.DEFINE_string("jax_backend_target", None, "JAX backend target to use.") -flags.mark_flags_as_required(["config", "workdir"]) - - -def main(argv): - del argv - - # Hide any GPUs from TensorFlow. Otherwise TF might reserve memory and make - # it unavailable to JAX. - tf.config.experimental.set_visible_devices([], "GPU") - - if FLAGS.jax_backend_target: - logging.info("Using JAX backend target %s", FLAGS.jax_backend_target) - jax.config.update("jax_xla_backend", "tpu_driver") - jax.config.update("jax_backend_target", FLAGS.jax_backend_target) - - logging.info("JAX host: %d / %d", jax.host_id(), jax.host_count()) - logging.info("JAX devices: %r", jax.devices()) - - # Add a note so that we can tell which task is which JAX host. - platform.work_unit().set_task_status( - f"host_id: {jax.host_id()}, host_count: {jax.host_count()}") - platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY, - FLAGS.workdir, "workdir") - - trainer.train_and_evaluate(FLAGS.config, FLAGS.workdir) - - -if __name__ == "__main__": - app.run(main) diff --git a/spaces/open-spaced-repetition/fsrs4anki_previewer/README.md b/spaces/open-spaced-repetition/fsrs4anki_previewer/README.md deleted file mode 100644 index ed3a56e6ea39dc7a298578be8f468943cdaee1c2..0000000000000000000000000000000000000000 --- a/spaces/open-spaced-repetition/fsrs4anki_previewer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fsrs4anki Previewer -emoji: 👁 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/parvezalmuqtadir/stablediffusionapi-vector-art/README.md b/spaces/parvezalmuqtadir/stablediffusionapi-vector-art/README.md deleted file mode 100644 index cfa1a6537ae33fbb8f44274a0b14c980f23f901f..0000000000000000000000000000000000000000 --- a/spaces/parvezalmuqtadir/stablediffusionapi-vector-art/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stablediffusionapi Vector Art -emoji: 😻 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pkiage/time_series_autocorrelation_demo/src/__init__.py b/spaces/pkiage/time_series_autocorrelation_demo/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pknez/face-swap-docker/chain_img_processor/video.py b/spaces/pknez/face-swap-docker/chain_img_processor/video.py deleted file mode 100644 index 857aea1a99eab21676f10341f4ad03dcd7f29d8a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/chain_img_processor/video.py +++ /dev/null @@ -1,132 +0,0 @@ -import roop.globals - -from threading import Thread -from chain_img_processor import ChainImgProcessor - -class ThreadWithReturnValue(Thread): - - def __init__(self, group=None, target=None, name=None, - args=(), kwargs={}, Verbose=None): - Thread.__init__(self, group, target, name, args, kwargs) - self._return = None - - def run(self): - if self._target is not None: - self._return = self._target(*self._args, - **self._kwargs) - - def join(self, *args): - Thread.join(self, *args) - return self._return - - -# in beta -class ChainVideoProcessor(ChainImgProcessor): - def __init__(self): - ChainImgProcessor.__init__(self) - - self.video_save_codec = "libx264" - self.video_save_crf = 14 - - def init_with_plugins(self): - self.init_plugins(["core","core_video"]) - self.display_init_info() - - init_on_start_arr = self.init_on_start.split(",") - for proc_id in init_on_start_arr: - self.init_processor(proc_id) - - def run_video_chain(self, source_video, target_video, fps, threads:int = 1, chain = None, params_frame_gen_func = None, video_audio = None): - import cv2 - from tqdm import tqdm - from chain_img_processor.ffmpeg_writer import FFMPEG_VideoWriter # ffmpeg install needed - - cap = cv2.VideoCapture(source_video) - # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - # first frame do manually - because upscale may happen, we need to estimate width/height - ret, frame = cap.read() - if params_frame_gen_func is not None: - params = params_frame_gen_func(self, frame) - else: - params = {} - params["original_frame"] = frame - frame_processed, params = self.run_chain(frame,params,chain) - height, width, channels = frame_processed.shape - - self.fill_processors_for_thread_chains(threads,chain) - #print(self.processors_objects) - #import threading - #locks:list[threading.Lock] = [] - locks: list[bool] = [] - for i in range(threads): - #locks.append(threading.Lock()) - locks.append(False) - - temp = [] - with FFMPEG_VideoWriter(target_video, (width, height), fps, codec=roop.globals.video_encoder, crf=roop.globals.video_quality, audiofile=video_audio) as output_video_ff: - with tqdm(total=frame_count, desc='Processing', unit="frame", dynamic_ncols=True, - bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]') as progress: - - # do first frame - output_video_ff.write_frame(frame_processed) - progress.update(1) # - cnt_frames = 0 - - # do rest frames - while True: - # getting frame - ret, frame = cap.read() - - if not ret: - break - cnt_frames+=1 - thread_ind = cnt_frames % threads - # we are having an array of length %gpu_threads%, running in parallel - # so if array is equal or longer than gpu threads, waiting - #while len(temp) >= threads: - while locks[thread_ind]: - #print('WAIT', thread_ind) - # we are order dependent, so we are forced to wait for first element to finish. When finished removing thread from the list - frame_processed, params = temp.pop(0).join() - locks[params["_thread_index"]] = False - #print('OFF',cnt_frames,locks[params["_thread_index"]],locks) - # writing into output - output_video_ff.write_frame(frame_processed) - # updating the status - progress.update(1) - - # calc params for frame - if params_frame_gen_func is not None: - params = params_frame_gen_func(self,frame) - else: - params = {} - - # adding new frame to the list and starting it - locks[thread_ind] = True - #print('ON', cnt_frames, thread_ind, locks) - params["original_frame"] = frame - temp.append( - ThreadWithReturnValue(target=self.run_chain, args=(frame, params, chain, thread_ind))) - temp[-1].start() - - while len(temp) > 0: - # we are order dependent, so we are forced to wait for first element to finish. When finished removing thread from the list - frame_processed, params = temp.pop(0).join() - locks[params["_thread_index"]] = False - # writing into output - output_video_ff.write_frame(frame_processed) - - progress.update(1) - - #print("FINAL", locks) - -_video_processor:ChainVideoProcessor = None -def get_single_video_processor() -> ChainVideoProcessor: - global _video_processor - if _video_processor is None: - _video_processor = ChainVideoProcessor() - _video_processor.init_with_plugins() - return _video_processor diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tomli/_types.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tomli/_types.py deleted file mode 100644 index d949412e03b29d70592c7721fe747e5085c2e280..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tomli/_types.py +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from typing import Any, Callable, Tuple - -# Type annotations -ParseFloat = Callable[[str], Any] -Key = Tuple[str, ...] -Pos = int diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_scripts.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_scripts.py deleted file mode 100644 index 8b3133f1fdfa67e58e4f944ecec432f42a4c048a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/command/install_scripts.py +++ /dev/null @@ -1,73 +0,0 @@ -from distutils import log -import distutils.command.install_scripts as orig -from distutils.errors import DistutilsModuleError -import os -import sys - -from .._path import ensure_directory - - -class install_scripts(orig.install_scripts): - """Do normal script install, plus any egg_info wrapper scripts""" - - def initialize_options(self): - orig.install_scripts.initialize_options(self) - self.no_ep = False - - def run(self): - self.run_command("egg_info") - if self.distribution.scripts: - orig.install_scripts.run(self) # run first to set up self.outfiles - else: - self.outfiles = [] - if self.no_ep: - # don't install entry point scripts into .egg file! - return - self._install_ep_scripts() - - def _install_ep_scripts(self): - # Delay import side-effects - from pkg_resources import Distribution, PathMetadata - from . import easy_install as ei - - ei_cmd = self.get_finalized_command("egg_info") - dist = Distribution( - ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), - ei_cmd.egg_name, ei_cmd.egg_version, - ) - bs_cmd = self.get_finalized_command('build_scripts') - exec_param = getattr(bs_cmd, 'executable', None) - try: - bw_cmd = self.get_finalized_command("bdist_wininst") - is_wininst = getattr(bw_cmd, '_is_running', False) - except (ImportError, DistutilsModuleError): - is_wininst = False - writer = ei.ScriptWriter - if is_wininst: - exec_param = "python.exe" - writer = ei.WindowsScriptWriter - if exec_param == sys.executable: - # In case the path to the Python executable contains a space, wrap - # it so it's not split up. - exec_param = [exec_param] - # resolve the writer to the environment - writer = writer.best() - cmd = writer.command_spec_class.best().from_param(exec_param) - for args in writer.get_args(dist, cmd.as_header()): - self.write_script(*args) - - def write_script(self, script_name, contents, mode="t", *ignored): - """Write an executable file to the scripts directory""" - from setuptools.command.easy_install import chmod, current_umask - - log.info("Installing %s script to %s", script_name, self.install_dir) - target = os.path.join(self.install_dir, script_name) - self.outfiles.append(target) - - mask = current_umask() - if not self.dry_run: - ensure_directory(target) - f = open(target, "w" + mode) - f.write(contents) - f.close() - chmod(target, 0o777 - mask) diff --git a/spaces/pplonski/mercury-hugging-face/welcome.md b/spaces/pplonski/mercury-hugging-face/welcome.md deleted file mode 100644 index 04510be8e1662d54db745c1ab67b6b773ef5d575..0000000000000000000000000000000000000000 --- a/spaces/pplonski/mercury-hugging-face/welcome.md +++ /dev/null @@ -1,27 +0,0 @@ - -# Welcome in Mercury 👋 - -Mercury framework allows you easily turn Jupyter Notebooks into shareble web applications. - -You can create beautiful and interactive web applications, reports, dashboards and presentations. - -Mercury features: -- add widgets with simple Python API, -- simple cell execution model - widgets trigger cell execution below the widget definition, -- hide or show notebook code, -- share multiple notebooks, -- executed notebook can be exported to HTML or PDF, -- embed notebook apps on any website, -- easily deploy (free & public Mercury cloud comming soon!) -- easily add authentication to notebooks (comming soon!) -- schedule automatic execution (comming soon!) - -Please check our documentation at RunMercury.com for more information 📚 - -This text can be edited by changing `welcome.md` file. Demo notebooks can be edited in Jupyter. - -All files created for demo are in the current directory. - -## Demo applications - - \ No newline at end of file diff --git a/spaces/prerna9811/Chord/portaudio/qa/paqa_latency.c b/spaces/prerna9811/Chord/portaudio/qa/paqa_latency.c deleted file mode 100644 index a70807b5119f4c9606528a3cefbb021e3ff58081..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/qa/paqa_latency.c +++ /dev/null @@ -1,482 +0,0 @@ -/** @file paqa_latency.c - @ingroup qa_src - @brief Test latency estimates. - @author Ross Bencina - @author Phil Burk -*/ -/* - * $Id: patest_sine.c 1368 2008-03-01 00:38:27Z rossb $ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com/ - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ -#include -#include -#include "portaudio.h" -#include "loopback/src/qa_tools.h" - -#define NUM_SECONDS (5) -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (64) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (200) -typedef struct -{ - float sine[TABLE_SIZE]; - int left_phase; - int right_phase; - char message[20]; - int minFramesPerBuffer; - int maxFramesPerBuffer; - int callbackCount; - PaTime minDeltaDacTime; - PaTime maxDeltaDacTime; - PaStreamCallbackTimeInfo previousTimeInfo; -} -paTestData; - -/* Used to tally the results of the QA tests. */ -int g_testsPassed = 0; -int g_testsFailed = 0; - -/* This routine will be called by the PortAudio engine when audio is needed. -** It may called at interrupt level on some machines so don't do anything -** that could mess up the system like calling malloc() or free(). -*/ -static int patestCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - paTestData *data = (paTestData*)userData; - float *out = (float*)outputBuffer; - unsigned long i; - - (void) timeInfo; /* Prevent unused variable warnings. */ - (void) statusFlags; - (void) inputBuffer; - - if( data->minFramesPerBuffer > framesPerBuffer ) - { - data->minFramesPerBuffer = framesPerBuffer; - } - if( data->maxFramesPerBuffer < framesPerBuffer ) - { - data->maxFramesPerBuffer = framesPerBuffer; - } - - /* Measure min and max output time stamp delta. */ - if( data->callbackCount > 0 ) - { - PaTime delta = timeInfo->outputBufferDacTime - data->previousTimeInfo.outputBufferDacTime; - if( data->minDeltaDacTime > delta ) - { - data->minDeltaDacTime = delta; - } - if( data->maxDeltaDacTime < delta ) - { - data->maxDeltaDacTime = delta; - } - } - data->previousTimeInfo = *timeInfo; - - for( i=0; isine[data->left_phase]; /* left */ - *out++ = data->sine[data->right_phase]; /* right */ - data->left_phase += 1; - if( data->left_phase >= TABLE_SIZE ) data->left_phase -= TABLE_SIZE; - data->right_phase += 3; /* higher pitch so we can distinguish left and right. */ - if( data->right_phase >= TABLE_SIZE ) data->right_phase -= TABLE_SIZE; - } - - data->callbackCount += 1; - return paContinue; -} - -PaError paqaCheckLatency( PaStreamParameters *outputParamsPtr, - paTestData *dataPtr, double sampleRate, unsigned long framesPerBuffer ) -{ - PaError err; - PaStream *stream; - const PaStreamInfo* streamInfo; - - dataPtr->minFramesPerBuffer = 9999999; - dataPtr->maxFramesPerBuffer = 0; - dataPtr->minDeltaDacTime = 9999999.0; - dataPtr->maxDeltaDacTime = 0.0; - dataPtr->callbackCount = 0; - - printf("Stream parameter: suggestedOutputLatency = %g\n", outputParamsPtr->suggestedLatency ); - if( framesPerBuffer == paFramesPerBufferUnspecified ){ - printf("Stream parameter: user framesPerBuffer = paFramesPerBufferUnspecified\n" ); - }else{ - printf("Stream parameter: user framesPerBuffer = %lu\n", framesPerBuffer ); - } - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - outputParamsPtr, - sampleRate, - framesPerBuffer, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - patestCallback, - dataPtr ); - if( err != paNoError ) goto error1; - - streamInfo = Pa_GetStreamInfo( stream ); - printf("Stream info: inputLatency = %g\n", streamInfo->inputLatency ); - printf("Stream info: outputLatency = %g\n", streamInfo->outputLatency ); - - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error2; - - printf("Play for %d seconds.\n", NUM_SECONDS ); - Pa_Sleep( NUM_SECONDS * 1000 ); - - printf(" minFramesPerBuffer = %4d\n", dataPtr->minFramesPerBuffer ); - printf(" maxFramesPerBuffer = %4d\n", dataPtr->maxFramesPerBuffer ); - printf(" minDeltaDacTime = %f\n", dataPtr->minDeltaDacTime ); - printf(" maxDeltaDacTime = %f\n", dataPtr->maxDeltaDacTime ); - - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error2; - - err = Pa_CloseStream( stream ); - Pa_Sleep( 1 * 1000 ); - - - printf("-------------------------------------\n"); - return err; -error2: - Pa_CloseStream( stream ); -error1: - printf("-------------------------------------\n"); - return err; -} - - -/*******************************************************************/ -static int paqaNoopCallback( const void *inputBuffer, void *outputBuffer, - unsigned long framesPerBuffer, - const PaStreamCallbackTimeInfo* timeInfo, - PaStreamCallbackFlags statusFlags, - void *userData ) -{ - (void)inputBuffer; - (void)outputBuffer; - (void)framesPerBuffer; - (void)timeInfo; - (void)statusFlags; - (void)userData; - return paContinue; -} - -/*******************************************************************/ -static int paqaCheckMultipleSuggested( PaDeviceIndex deviceIndex, int isInput ) -{ - int i; - int numLoops = 10; - PaError err; - PaStream *stream; - PaStreamParameters streamParameters; - const PaStreamInfo* streamInfo; - double lowLatency; - double highLatency; - double finalLatency; - double sampleRate = SAMPLE_RATE; - const PaDeviceInfo *pdi = Pa_GetDeviceInfo( deviceIndex ); - double previousLatency = 0.0; - int numChannels = 1; - float toleranceRatio = 1.0; - - printf("------------------------ paqaCheckMultipleSuggested - %s\n", - (isInput ? "INPUT" : "OUTPUT") ); - if( isInput ) - { - lowLatency = pdi->defaultLowInputLatency; - highLatency = pdi->defaultHighInputLatency; - numChannels = (pdi->maxInputChannels < 2) ? 1 : 2; - } - else - { - lowLatency = pdi->defaultLowOutputLatency; - highLatency = pdi->defaultHighOutputLatency; - numChannels = (pdi->maxOutputChannels < 2) ? 1 : 2; - } - streamParameters.channelCount = numChannels; - streamParameters.device = deviceIndex; - streamParameters.hostApiSpecificStreamInfo = NULL; - streamParameters.sampleFormat = paFloat32; - sampleRate = pdi->defaultSampleRate; - - printf(" lowLatency = %g\n", lowLatency ); - printf(" highLatency = %g\n", highLatency ); - printf(" numChannels = %d\n", numChannels ); - printf(" sampleRate = %g\n", sampleRate ); - - if( (highLatency - lowLatency) < 0.001 ) - { - numLoops = 1; - } - - for( i=0; iinputLatency; - } - else - { - finalLatency = streamInfo->outputLatency; - } - printf(" finalLatency = %6.4f\n", finalLatency ); - /* For the default low & high latency values, expect quite close; for other requested - * values, at worst the next power-of-2 may result (eg 513 -> 1024) */ - toleranceRatio = ( (i == 0) || (i == ( numLoops - 1 )) ) ? 0.1 : 1.0; - QA_ASSERT_CLOSE( "final latency should be close to suggested latency", - streamParameters.suggestedLatency, finalLatency, (streamParameters.suggestedLatency * toleranceRatio) ); - if( i == 0 ) - { - previousLatency = finalLatency; - } - } - - if( numLoops > 1 ) - { - QA_ASSERT_TRUE( " final latency should increase with suggested latency", (finalLatency > previousLatency) ); - } - - return 0; -error: - return -1; -} - -/*******************************************************************/ -static int paqaVerifySuggestedLatency( void ) -{ - PaDeviceIndex id; - int result = 0; - const PaDeviceInfo *pdi; - int numDevices = Pa_GetDeviceCount(); - - printf("\n ------------------------ paqaVerifySuggestedLatency\n"); - for( id=0; idname, Pa_GetHostApiInfo(pdi->hostApi)->name); - if( pdi->maxOutputChannels > 0 ) - { - if( paqaCheckMultipleSuggested( id, 0 ) < 0 ) - { - printf("OUTPUT CHECK FAILED !!! #%d: '%s'\n", id, pdi->name); - result -= 1; - } - } - if( pdi->maxInputChannels > 0 ) - { - if( paqaCheckMultipleSuggested( id, 1 ) < 0 ) - { - printf("INPUT CHECK FAILED !!! #%d: '%s'\n", id, pdi->name); - result -= 1; - } - } - } - return result; -} - -/*******************************************************************/ -static int paqaVerifyDeviceInfoLatency( void ) -{ - PaDeviceIndex id; - const PaDeviceInfo *pdi; - int numDevices = Pa_GetDeviceCount(); - - printf("\n ------------------------ paqaVerifyDeviceInfoLatency\n"); - for( id=0; idname, Pa_GetHostApiInfo(pdi->hostApi)->name); - if( pdi->maxOutputChannels > 0 ) - { - printf(" Output defaultLowOutputLatency = %f seconds\n", pdi->defaultLowOutputLatency); - printf(" Output defaultHighOutputLatency = %f seconds\n", pdi->defaultHighOutputLatency); - QA_ASSERT_TRUE( "defaultLowOutputLatency should be > 0", (pdi->defaultLowOutputLatency > 0.0) ); - QA_ASSERT_TRUE( "defaultHighOutputLatency should be > 0", (pdi->defaultHighOutputLatency > 0.0) ); - QA_ASSERT_TRUE( "defaultHighOutputLatency should be >= Low", (pdi->defaultHighOutputLatency >= pdi->defaultLowOutputLatency) ); - } - if( pdi->maxInputChannels > 0 ) - { - printf(" Input defaultLowInputLatency = %f seconds\n", pdi->defaultLowInputLatency); - printf(" Input defaultHighInputLatency = %f seconds\n", pdi->defaultHighInputLatency); - QA_ASSERT_TRUE( "defaultLowInputLatency should be > 0", (pdi->defaultLowInputLatency > 0.0) ); - QA_ASSERT_TRUE( "defaultHighInputLatency should be > 0", (pdi->defaultHighInputLatency > 0.0) ); - QA_ASSERT_TRUE( "defaultHighInputLatency should be >= Low", (pdi->defaultHighInputLatency >= pdi->defaultLowInputLatency) ); - } - } - return 0; -error: - return -1; -} - - - -/*******************************************************************/ -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaError err; - paTestData data; - const PaDeviceInfo *deviceInfo; - int i; - int framesPerBuffer; - double sampleRate = SAMPLE_RATE; - - printf("\nPortAudio QA: investigate output latency.\n"); - - /* initialise sinusoidal wavetable */ - for( i=0; iname, Pa_GetHostApiInfo(deviceInfo->hostApi)->name); - printf("Device info: defaultLowOutputLatency = %f seconds\n", deviceInfo->defaultLowOutputLatency); - printf("Device info: defaultHighOutputLatency = %f seconds\n", deviceInfo->defaultHighOutputLatency); - sampleRate = deviceInfo->defaultSampleRate; - printf("Sample Rate for following tests: %g\n", sampleRate); - outputParameters.hostApiSpecificStreamInfo = NULL; - printf("-------------------------------------\n"); - - // Try to use a small buffer that is smaller than we think the device can handle. - // Try to force combining multiple user buffers into a host buffer. - printf("------------- Try a very small buffer.\n"); - framesPerBuffer = 9; - outputParameters.suggestedLatency = deviceInfo->defaultLowOutputLatency; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, framesPerBuffer ); - if( err != paNoError ) goto error; - - printf("------------- 64 frame buffer with 1.1 * defaultLow latency.\n"); - framesPerBuffer = 64; - outputParameters.suggestedLatency = deviceInfo->defaultLowOutputLatency * 1.1; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, framesPerBuffer ); - if( err != paNoError ) goto error; - - // Try to create a huge buffer that is bigger than the allowed device maximum. - printf("------------- Try a huge buffer.\n"); - framesPerBuffer = 16*1024; - outputParameters.suggestedLatency = ((double)framesPerBuffer) / sampleRate; // approximate - err = paqaCheckLatency( &outputParameters, &data, sampleRate, framesPerBuffer ); - if( err != paNoError ) goto error; - - printf("------------- Try suggestedLatency = 0.0\n"); - outputParameters.suggestedLatency = 0.0; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, paFramesPerBufferUnspecified ); - if( err != paNoError ) goto error; - - printf("------------- Try suggestedLatency = defaultLowOutputLatency\n"); - outputParameters.suggestedLatency = deviceInfo->defaultLowOutputLatency; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, paFramesPerBufferUnspecified ); - if( err != paNoError ) goto error; - - printf("------------- Try suggestedLatency = defaultHighOutputLatency\n"); - outputParameters.suggestedLatency = deviceInfo->defaultHighOutputLatency; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, paFramesPerBufferUnspecified ); - if( err != paNoError ) goto error; - - printf("------------- Try suggestedLatency = defaultHighOutputLatency * 4\n"); - outputParameters.suggestedLatency = deviceInfo->defaultHighOutputLatency * 4; - err = paqaCheckLatency( &outputParameters, &data, sampleRate, paFramesPerBufferUnspecified ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("SUCCESS - test finished.\n"); - return err; - -error: - Pa_Terminate(); - fprintf( stderr, "ERROR - test failed.\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h b/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h deleted file mode 100644 index 81155d7a92ad4ab0597888f88c542d1544187168..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/hostapi/wasapi/mingw-include/endpointvolume.h +++ /dev/null @@ -1,620 +0,0 @@ - - -/* this ALWAYS GENERATED file contains the definitions for the interfaces */ - - - /* File created by MIDL compiler version 7.00.0499 */ -/* Compiler settings for endpointvolume.idl: - Oicf, W1, Zp8, env=Win32 (32b run) - protocol : dce , ms_ext, c_ext, robust - error checks: allocation ref bounds_check enum stub_data - VC __declspec() decoration level: - __declspec(uuid()), __declspec(selectany), __declspec(novtable) - DECLSPEC_UUID(), MIDL_INTERFACE() -*/ -//@@MIDL_FILE_HEADING( ) - -#pragma warning( disable: 4049 ) /* more than 64k source lines */ - - -/* verify that the version is high enough to compile this file*/ -#ifndef __REQUIRED_RPCNDR_H_VERSION__ -#define __REQUIRED_RPCNDR_H_VERSION__ 500 -#endif - -/* verify that the version is high enough to compile this file*/ -#ifndef __REQUIRED_RPCSAL_H_VERSION__ -#define __REQUIRED_RPCSAL_H_VERSION__ 100 -#endif - -#include "rpc.h" -#include "rpcndr.h" - -#ifndef __RPCNDR_H_VERSION__ -#error this stub requires an updated version of -#endif // __RPCNDR_H_VERSION__ - -#ifndef COM_NO_WINDOWS_H -#include "windows.h" -#include "ole2.h" -#endif /*COM_NO_WINDOWS_H*/ - -#ifndef __endpointvolume_h__ -#define __endpointvolume_h__ - -#if defined(_MSC_VER) && (_MSC_VER >= 1020) -#pragma once -#endif - -/* Forward Declarations */ - -#ifndef __IAudioEndpointVolumeCallback_FWD_DEFINED__ -#define __IAudioEndpointVolumeCallback_FWD_DEFINED__ -typedef interface IAudioEndpointVolumeCallback IAudioEndpointVolumeCallback; -#endif /* __IAudioEndpointVolumeCallback_FWD_DEFINED__ */ - - -#ifndef __IAudioEndpointVolume_FWD_DEFINED__ -#define __IAudioEndpointVolume_FWD_DEFINED__ -typedef interface IAudioEndpointVolume IAudioEndpointVolume; -#endif /* __IAudioEndpointVolume_FWD_DEFINED__ */ - - -#ifndef __IAudioMeterInformation_FWD_DEFINED__ -#define __IAudioMeterInformation_FWD_DEFINED__ -typedef interface IAudioMeterInformation IAudioMeterInformation; -#endif /* __IAudioMeterInformation_FWD_DEFINED__ */ - - -/* header files for imported files */ -#include "unknwn.h" -#include "devicetopology.h" - -#ifdef __cplusplus -extern "C"{ -#endif - - -/* interface __MIDL_itf_endpointvolume_0000_0000 */ -/* [local] */ - -typedef struct AUDIO_VOLUME_NOTIFICATION_DATA - { - GUID guidEventContext; - BOOL bMuted; - float fMasterVolume; - UINT nChannels; - float afChannelVolumes[ 1 ]; - } AUDIO_VOLUME_NOTIFICATION_DATA; - -typedef struct AUDIO_VOLUME_NOTIFICATION_DATA *PAUDIO_VOLUME_NOTIFICATION_DATA; - -#define ENDPOINT_HARDWARE_SUPPORT_VOLUME 0x00000001 -#define ENDPOINT_HARDWARE_SUPPORT_MUTE 0x00000002 -#define ENDPOINT_HARDWARE_SUPPORT_METER 0x00000004 - - -extern RPC_IF_HANDLE __MIDL_itf_endpointvolume_0000_0000_v0_0_c_ifspec; -extern RPC_IF_HANDLE __MIDL_itf_endpointvolume_0000_0000_v0_0_s_ifspec; - -#ifndef __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__ -#define __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__ - -/* interface IAudioEndpointVolumeCallback */ -/* [unique][helpstring][nonextensible][uuid][local][object] */ - - -EXTERN_C const IID IID_IAudioEndpointVolumeCallback; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("657804FA-D6AD-4496-8A60-352752AF4F89") - IAudioEndpointVolumeCallback : public IUnknown - { - public: - virtual HRESULT STDMETHODCALLTYPE OnNotify( - PAUDIO_VOLUME_NOTIFICATION_DATA pNotify) = 0; - - }; - -#else /* C style interface */ - - typedef struct IAudioEndpointVolumeCallbackVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IAudioEndpointVolumeCallback * This, - /* [in] */ REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IAudioEndpointVolumeCallback * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IAudioEndpointVolumeCallback * This); - - HRESULT ( STDMETHODCALLTYPE *OnNotify )( - IAudioEndpointVolumeCallback * This, - PAUDIO_VOLUME_NOTIFICATION_DATA pNotify); - - END_INTERFACE - } IAudioEndpointVolumeCallbackVtbl; - - interface IAudioEndpointVolumeCallback - { - CONST_VTBL struct IAudioEndpointVolumeCallbackVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IAudioEndpointVolumeCallback_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IAudioEndpointVolumeCallback_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IAudioEndpointVolumeCallback_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IAudioEndpointVolumeCallback_OnNotify(This,pNotify) \ - ( (This)->lpVtbl -> OnNotify(This,pNotify) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IAudioEndpointVolumeCallback_INTERFACE_DEFINED__ */ - - -#ifndef __IAudioEndpointVolume_INTERFACE_DEFINED__ -#define __IAudioEndpointVolume_INTERFACE_DEFINED__ - -/* interface IAudioEndpointVolume */ -/* [unique][helpstring][nonextensible][uuid][local][object] */ - - -EXTERN_C const IID IID_IAudioEndpointVolume; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("5CDF2C82-841E-4546-9722-0CF74078229A") - IAudioEndpointVolume : public IUnknown - { - public: - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE RegisterControlChangeNotify( - /* [in] */ - __in IAudioEndpointVolumeCallback *pNotify) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE UnregisterControlChangeNotify( - /* [in] */ - __in IAudioEndpointVolumeCallback *pNotify) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelCount( - /* [out] */ - __out UINT *pnChannelCount) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMasterVolumeLevel( - /* [in] */ - __in float fLevelDB, - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMasterVolumeLevelScalar( - /* [in] */ - __in float fLevel, - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMasterVolumeLevel( - /* [out] */ - __out float *pfLevelDB) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMasterVolumeLevelScalar( - /* [out] */ - __out float *pfLevel) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetChannelVolumeLevel( - /* [in] */ - __in UINT nChannel, - float fLevelDB, - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetChannelVolumeLevelScalar( - /* [in] */ - __in UINT nChannel, - float fLevel, - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelVolumeLevel( - /* [in] */ - __in UINT nChannel, - /* [out] */ - __out float *pfLevelDB) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelVolumeLevelScalar( - /* [in] */ - __in UINT nChannel, - /* [out] */ - __out float *pfLevel) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE SetMute( - /* [in] */ - __in BOOL bMute, - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMute( - /* [out] */ - __out BOOL *pbMute) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetVolumeStepInfo( - /* [out] */ - __out UINT *pnStep, - /* [out] */ - __out UINT *pnStepCount) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE VolumeStepUp( - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE VolumeStepDown( - /* [unique][in] */ LPCGUID pguidEventContext) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE QueryHardwareSupport( - /* [out] */ - __out DWORD *pdwHardwareSupportMask) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetVolumeRange( - /* [out] */ - __out float *pflVolumeMindB, - /* [out] */ - __out float *pflVolumeMaxdB, - /* [out] */ - __out float *pflVolumeIncrementdB) = 0; - - }; - -#else /* C style interface */ - - typedef struct IAudioEndpointVolumeVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IAudioEndpointVolume * This, - /* [in] */ REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IAudioEndpointVolume * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IAudioEndpointVolume * This); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *RegisterControlChangeNotify )( - IAudioEndpointVolume * This, - /* [in] */ - __in IAudioEndpointVolumeCallback *pNotify); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *UnregisterControlChangeNotify )( - IAudioEndpointVolume * This, - /* [in] */ - __in IAudioEndpointVolumeCallback *pNotify); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelCount )( - IAudioEndpointVolume * This, - /* [out] */ - __out UINT *pnChannelCount); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMasterVolumeLevel )( - IAudioEndpointVolume * This, - /* [in] */ - __in float fLevelDB, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMasterVolumeLevelScalar )( - IAudioEndpointVolume * This, - /* [in] */ - __in float fLevel, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMasterVolumeLevel )( - IAudioEndpointVolume * This, - /* [out] */ - __out float *pfLevelDB); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMasterVolumeLevelScalar )( - IAudioEndpointVolume * This, - /* [out] */ - __out float *pfLevel); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetChannelVolumeLevel )( - IAudioEndpointVolume * This, - /* [in] */ - __in UINT nChannel, - float fLevelDB, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetChannelVolumeLevelScalar )( - IAudioEndpointVolume * This, - /* [in] */ - __in UINT nChannel, - float fLevel, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelVolumeLevel )( - IAudioEndpointVolume * This, - /* [in] */ - __in UINT nChannel, - /* [out] */ - __out float *pfLevelDB); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelVolumeLevelScalar )( - IAudioEndpointVolume * This, - /* [in] */ - __in UINT nChannel, - /* [out] */ - __out float *pfLevel); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *SetMute )( - IAudioEndpointVolume * This, - /* [in] */ - __in BOOL bMute, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMute )( - IAudioEndpointVolume * This, - /* [out] */ - __out BOOL *pbMute); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetVolumeStepInfo )( - IAudioEndpointVolume * This, - /* [out] */ - __out UINT *pnStep, - /* [out] */ - __out UINT *pnStepCount); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *VolumeStepUp )( - IAudioEndpointVolume * This, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *VolumeStepDown )( - IAudioEndpointVolume * This, - /* [unique][in] */ LPCGUID pguidEventContext); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *QueryHardwareSupport )( - IAudioEndpointVolume * This, - /* [out] */ - __out DWORD *pdwHardwareSupportMask); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetVolumeRange )( - IAudioEndpointVolume * This, - /* [out] */ - __out float *pflVolumeMindB, - /* [out] */ - __out float *pflVolumeMaxdB, - /* [out] */ - __out float *pflVolumeIncrementdB); - - END_INTERFACE - } IAudioEndpointVolumeVtbl; - - interface IAudioEndpointVolume - { - CONST_VTBL struct IAudioEndpointVolumeVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IAudioEndpointVolume_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IAudioEndpointVolume_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IAudioEndpointVolume_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IAudioEndpointVolume_RegisterControlChangeNotify(This,pNotify) \ - ( (This)->lpVtbl -> RegisterControlChangeNotify(This,pNotify) ) - -#define IAudioEndpointVolume_UnregisterControlChangeNotify(This,pNotify) \ - ( (This)->lpVtbl -> UnregisterControlChangeNotify(This,pNotify) ) - -#define IAudioEndpointVolume_GetChannelCount(This,pnChannelCount) \ - ( (This)->lpVtbl -> GetChannelCount(This,pnChannelCount) ) - -#define IAudioEndpointVolume_SetMasterVolumeLevel(This,fLevelDB,pguidEventContext) \ - ( (This)->lpVtbl -> SetMasterVolumeLevel(This,fLevelDB,pguidEventContext) ) - -#define IAudioEndpointVolume_SetMasterVolumeLevelScalar(This,fLevel,pguidEventContext) \ - ( (This)->lpVtbl -> SetMasterVolumeLevelScalar(This,fLevel,pguidEventContext) ) - -#define IAudioEndpointVolume_GetMasterVolumeLevel(This,pfLevelDB) \ - ( (This)->lpVtbl -> GetMasterVolumeLevel(This,pfLevelDB) ) - -#define IAudioEndpointVolume_GetMasterVolumeLevelScalar(This,pfLevel) \ - ( (This)->lpVtbl -> GetMasterVolumeLevelScalar(This,pfLevel) ) - -#define IAudioEndpointVolume_SetChannelVolumeLevel(This,nChannel,fLevelDB,pguidEventContext) \ - ( (This)->lpVtbl -> SetChannelVolumeLevel(This,nChannel,fLevelDB,pguidEventContext) ) - -#define IAudioEndpointVolume_SetChannelVolumeLevelScalar(This,nChannel,fLevel,pguidEventContext) \ - ( (This)->lpVtbl -> SetChannelVolumeLevelScalar(This,nChannel,fLevel,pguidEventContext) ) - -#define IAudioEndpointVolume_GetChannelVolumeLevel(This,nChannel,pfLevelDB) \ - ( (This)->lpVtbl -> GetChannelVolumeLevel(This,nChannel,pfLevelDB) ) - -#define IAudioEndpointVolume_GetChannelVolumeLevelScalar(This,nChannel,pfLevel) \ - ( (This)->lpVtbl -> GetChannelVolumeLevelScalar(This,nChannel,pfLevel) ) - -#define IAudioEndpointVolume_SetMute(This,bMute,pguidEventContext) \ - ( (This)->lpVtbl -> SetMute(This,bMute,pguidEventContext) ) - -#define IAudioEndpointVolume_GetMute(This,pbMute) \ - ( (This)->lpVtbl -> GetMute(This,pbMute) ) - -#define IAudioEndpointVolume_GetVolumeStepInfo(This,pnStep,pnStepCount) \ - ( (This)->lpVtbl -> GetVolumeStepInfo(This,pnStep,pnStepCount) ) - -#define IAudioEndpointVolume_VolumeStepUp(This,pguidEventContext) \ - ( (This)->lpVtbl -> VolumeStepUp(This,pguidEventContext) ) - -#define IAudioEndpointVolume_VolumeStepDown(This,pguidEventContext) \ - ( (This)->lpVtbl -> VolumeStepDown(This,pguidEventContext) ) - -#define IAudioEndpointVolume_QueryHardwareSupport(This,pdwHardwareSupportMask) \ - ( (This)->lpVtbl -> QueryHardwareSupport(This,pdwHardwareSupportMask) ) - -#define IAudioEndpointVolume_GetVolumeRange(This,pflVolumeMindB,pflVolumeMaxdB,pflVolumeIncrementdB) \ - ( (This)->lpVtbl -> GetVolumeRange(This,pflVolumeMindB,pflVolumeMaxdB,pflVolumeIncrementdB) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IAudioEndpointVolume_INTERFACE_DEFINED__ */ - - -#ifndef __IAudioMeterInformation_INTERFACE_DEFINED__ -#define __IAudioMeterInformation_INTERFACE_DEFINED__ - -/* interface IAudioMeterInformation */ -/* [unique][helpstring][nonextensible][uuid][local][object] */ - - -EXTERN_C const IID IID_IAudioMeterInformation; - -#if defined(__cplusplus) && !defined(CINTERFACE) - - MIDL_INTERFACE("C02216F6-8C67-4B5B-9D00-D008E73E0064") - IAudioMeterInformation : public IUnknown - { - public: - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetPeakValue( - /* [out] */ float *pfPeak) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetMeteringChannelCount( - /* [out] */ - __out UINT *pnChannelCount) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE GetChannelsPeakValues( - /* [in] */ UINT32 u32ChannelCount, - /* [size_is][out] */ float *afPeakValues) = 0; - - virtual /* [helpstring] */ HRESULT STDMETHODCALLTYPE QueryHardwareSupport( - /* [out] */ - __out DWORD *pdwHardwareSupportMask) = 0; - - }; - -#else /* C style interface */ - - typedef struct IAudioMeterInformationVtbl - { - BEGIN_INTERFACE - - HRESULT ( STDMETHODCALLTYPE *QueryInterface )( - IAudioMeterInformation * This, - /* [in] */ REFIID riid, - /* [iid_is][out] */ - __RPC__deref_out void **ppvObject); - - ULONG ( STDMETHODCALLTYPE *AddRef )( - IAudioMeterInformation * This); - - ULONG ( STDMETHODCALLTYPE *Release )( - IAudioMeterInformation * This); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetPeakValue )( - IAudioMeterInformation * This, - /* [out] */ float *pfPeak); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetMeteringChannelCount )( - IAudioMeterInformation * This, - /* [out] */ - __out UINT *pnChannelCount); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *GetChannelsPeakValues )( - IAudioMeterInformation * This, - /* [in] */ UINT32 u32ChannelCount, - /* [size_is][out] */ float *afPeakValues); - - /* [helpstring] */ HRESULT ( STDMETHODCALLTYPE *QueryHardwareSupport )( - IAudioMeterInformation * This, - /* [out] */ - __out DWORD *pdwHardwareSupportMask); - - END_INTERFACE - } IAudioMeterInformationVtbl; - - interface IAudioMeterInformation - { - CONST_VTBL struct IAudioMeterInformationVtbl *lpVtbl; - }; - - - -#ifdef COBJMACROS - - -#define IAudioMeterInformation_QueryInterface(This,riid,ppvObject) \ - ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) - -#define IAudioMeterInformation_AddRef(This) \ - ( (This)->lpVtbl -> AddRef(This) ) - -#define IAudioMeterInformation_Release(This) \ - ( (This)->lpVtbl -> Release(This) ) - - -#define IAudioMeterInformation_GetPeakValue(This,pfPeak) \ - ( (This)->lpVtbl -> GetPeakValue(This,pfPeak) ) - -#define IAudioMeterInformation_GetMeteringChannelCount(This,pnChannelCount) \ - ( (This)->lpVtbl -> GetMeteringChannelCount(This,pnChannelCount) ) - -#define IAudioMeterInformation_GetChannelsPeakValues(This,u32ChannelCount,afPeakValues) \ - ( (This)->lpVtbl -> GetChannelsPeakValues(This,u32ChannelCount,afPeakValues) ) - -#define IAudioMeterInformation_QueryHardwareSupport(This,pdwHardwareSupportMask) \ - ( (This)->lpVtbl -> QueryHardwareSupport(This,pdwHardwareSupportMask) ) - -#endif /* COBJMACROS */ - - -#endif /* C style interface */ - - - - -#endif /* __IAudioMeterInformation_INTERFACE_DEFINED__ */ - - -/* Additional Prototypes for ALL interfaces */ - -/* end of Additional Prototypes */ - -#ifdef __cplusplus -} -#endif - -#endif - - - diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/utils.py deleted file mode 100644 index e5cbbf4c0ddfa5c1b5898d8a4405e27292100d41..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/charset_normalizer/utils.py +++ /dev/null @@ -1,421 +0,0 @@ -import importlib -import logging -import unicodedata -from codecs import IncrementalDecoder -from encodings.aliases import aliases -from functools import lru_cache -from re import findall -from typing import Generator, List, Optional, Set, Tuple, Union - -from _multibytecodec import MultibyteIncrementalDecoder - -from .constant import ( - ENCODING_MARKS, - IANA_SUPPORTED_SIMILAR, - RE_POSSIBLE_ENCODING_INDICATION, - UNICODE_RANGES_COMBINED, - UNICODE_SECONDARY_RANGE_KEYWORD, - UTF8_MAXIMAL_ALLOCATION, -) - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_accentuated(character: str) -> bool: - try: - description: str = unicodedata.name(character) - except ValueError: - return False - return ( - "WITH GRAVE" in description - or "WITH ACUTE" in description - or "WITH CEDILLA" in description - or "WITH DIAERESIS" in description - or "WITH CIRCUMFLEX" in description - or "WITH TILDE" in description - or "WITH MACRON" in description - or "WITH RING ABOVE" in description - ) - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def remove_accent(character: str) -> str: - decomposed: str = unicodedata.decomposition(character) - if not decomposed: - return character - - codes: List[str] = decomposed.split(" ") - - return chr(int(codes[0], 16)) - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def unicode_range(character: str) -> Optional[str]: - """ - Retrieve the Unicode range official name from a single character. - """ - character_ord: int = ord(character) - - for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): - if character_ord in ord_range: - return range_name - - return None - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_latin(character: str) -> bool: - try: - description: str = unicodedata.name(character) - except ValueError: - return False - return "LATIN" in description - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_punctuation(character: str) -> bool: - character_category: str = unicodedata.category(character) - - if "P" in character_category: - return True - - character_range: Optional[str] = unicode_range(character) - - if character_range is None: - return False - - return "Punctuation" in character_range - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_symbol(character: str) -> bool: - character_category: str = unicodedata.category(character) - - if "S" in character_category or "N" in character_category: - return True - - character_range: Optional[str] = unicode_range(character) - - if character_range is None: - return False - - return "Forms" in character_range and character_category != "Lo" - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_emoticon(character: str) -> bool: - character_range: Optional[str] = unicode_range(character) - - if character_range is None: - return False - - return "Emoticons" in character_range or "Pictographs" in character_range - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_separator(character: str) -> bool: - if character.isspace() or character in {"|", "+", "<", ">"}: - return True - - character_category: str = unicodedata.category(character) - - return "Z" in character_category or character_category in {"Po", "Pd", "Pc"} - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_case_variable(character: str) -> bool: - return character.islower() != character.isupper() - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_cjk(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "CJK" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_hiragana(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "HIRAGANA" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_katakana(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "KATAKANA" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_hangul(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "HANGUL" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_thai(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "THAI" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_arabic(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "ARABIC" in character_name - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_arabic_isolated_form(character: str) -> bool: - try: - character_name = unicodedata.name(character) - except ValueError: - return False - - return "ARABIC" in character_name and "ISOLATED FORM" in character_name - - -@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) -def is_unicode_range_secondary(range_name: str) -> bool: - return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) - - -@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) -def is_unprintable(character: str) -> bool: - return ( - character.isspace() is False # includes \n \t \r \v - and character.isprintable() is False - and character != "\x1A" # Why? Its the ASCII substitute character. - and character != "\ufeff" # bug discovered in Python, - # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. - ) - - -def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]: - """ - Extract using ASCII-only decoder any specified encoding in the first n-bytes. - """ - if not isinstance(sequence, bytes): - raise TypeError - - seq_len: int = len(sequence) - - results: List[str] = findall( - RE_POSSIBLE_ENCODING_INDICATION, - sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), - ) - - if len(results) == 0: - return None - - for specified_encoding in results: - specified_encoding = specified_encoding.lower().replace("-", "_") - - encoding_alias: str - encoding_iana: str - - for encoding_alias, encoding_iana in aliases.items(): - if encoding_alias == specified_encoding: - return encoding_iana - if encoding_iana == specified_encoding: - return encoding_iana - - return None - - -@lru_cache(maxsize=128) -def is_multi_byte_encoding(name: str) -> bool: - """ - Verify is a specific encoding is a multi byte one based on it IANA name - """ - return name in { - "utf_8", - "utf_8_sig", - "utf_16", - "utf_16_be", - "utf_16_le", - "utf_32", - "utf_32_le", - "utf_32_be", - "utf_7", - } or issubclass( - importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, - MultibyteIncrementalDecoder, - ) - - -def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]: - """ - Identify and extract SIG/BOM in given sequence. - """ - - for iana_encoding in ENCODING_MARKS: - marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding] - - if isinstance(marks, bytes): - marks = [marks] - - for mark in marks: - if sequence.startswith(mark): - return iana_encoding, mark - - return None, b"" - - -def should_strip_sig_or_bom(iana_encoding: str) -> bool: - return iana_encoding not in {"utf_16", "utf_32"} - - -def iana_name(cp_name: str, strict: bool = True) -> str: - cp_name = cp_name.lower().replace("-", "_") - - encoding_alias: str - encoding_iana: str - - for encoding_alias, encoding_iana in aliases.items(): - if cp_name in [encoding_alias, encoding_iana]: - return encoding_iana - - if strict: - raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name)) - - return cp_name - - -def range_scan(decoded_sequence: str) -> List[str]: - ranges: Set[str] = set() - - for character in decoded_sequence: - character_range: Optional[str] = unicode_range(character) - - if character_range is None: - continue - - ranges.add(character_range) - - return list(ranges) - - -def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: - if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): - return 0.0 - - decoder_a = importlib.import_module( - "encodings.{}".format(iana_name_a) - ).IncrementalDecoder - decoder_b = importlib.import_module( - "encodings.{}".format(iana_name_b) - ).IncrementalDecoder - - id_a: IncrementalDecoder = decoder_a(errors="ignore") - id_b: IncrementalDecoder = decoder_b(errors="ignore") - - character_match_count: int = 0 - - for i in range(255): - to_be_decoded: bytes = bytes([i]) - if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): - character_match_count += 1 - - return character_match_count / 254 - - -def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: - """ - Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using - the function cp_similarity. - """ - return ( - iana_name_a in IANA_SUPPORTED_SIMILAR - and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] - ) - - -def set_logging_handler( - name: str = "charset_normalizer", - level: int = logging.INFO, - format_string: str = "%(asctime)s | %(levelname)s | %(message)s", -) -> None: - logger = logging.getLogger(name) - logger.setLevel(level) - - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter(format_string)) - logger.addHandler(handler) - - -def cut_sequence_chunks( - sequences: bytes, - encoding_iana: str, - offsets: range, - chunk_size: int, - bom_or_sig_available: bool, - strip_sig_or_bom: bool, - sig_payload: bytes, - is_multi_byte_decoder: bool, - decoded_payload: Optional[str] = None, -) -> Generator[str, None, None]: - if decoded_payload and is_multi_byte_decoder is False: - for i in offsets: - chunk = decoded_payload[i : i + chunk_size] - if not chunk: - break - yield chunk - else: - for i in offsets: - chunk_end = i + chunk_size - if chunk_end > len(sequences) + 8: - continue - - cut_sequence = sequences[i : i + chunk_size] - - if bom_or_sig_available and strip_sig_or_bom is False: - cut_sequence = sig_payload + cut_sequence - - chunk = cut_sequence.decode( - encoding_iana, - errors="ignore" if is_multi_byte_decoder else "strict", - ) - - # multi-byte bad cutting detector and adjustment - # not the cleanest way to perform that fix but clever enough for now. - if is_multi_byte_decoder and i > 0: - chunk_partial_size_chk: int = min(chunk_size, 16) - - if ( - decoded_payload - and chunk[:chunk_partial_size_chk] not in decoded_payload - ): - for j in range(i, i - 4, -1): - cut_sequence = sequences[j:chunk_end] - - if bom_or_sig_available and strip_sig_or_bom is False: - cut_sequence = sig_payload + cut_sequence - - chunk = cut_sequence.decode(encoding_iana, errors="ignore") - - if chunk[:chunk_partial_size_chk] in decoded_payload: - break - - yield chunk diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/setup.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/setup.py deleted file mode 100644 index 4fed75de1cbc22357c675fd8ce2d52cbb6829b50..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/setup.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 -def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('matrixlib', parent_package, top_path) - config.add_subpackage('tests') - config.add_data_files('*.pyi') - return config - -if __name__ == "__main__": - from numpy.distutils.core import setup - config = configuration(top_path='').todict() - setup(**config) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py deleted file mode 100644 index d0ce357aef2765ad72cd3a5f4d0ed48fc07463c1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py +++ /dev/null @@ -1,231 +0,0 @@ -import numpy as np -from numpy.testing import assert_warns -from numpy.ma.testutils import (assert_, assert_equal, assert_raises, - assert_array_equal) -from numpy.ma.core import (masked_array, masked_values, masked, allequal, - MaskType, getmask, MaskedArray, nomask, - log, add, hypot, divide) -from numpy.ma.extras import mr_ -from numpy.compat import pickle - - -class MMatrix(MaskedArray, np.matrix,): - - def __new__(cls, data, mask=nomask): - mat = np.matrix(data) - _data = MaskedArray.__new__(cls, data=mat, mask=mask) - return _data - - def __array_finalize__(self, obj): - np.matrix.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self, obj) - return - - @property - def _series(self): - _view = self.view(MaskedArray) - _view._sharedmask = False - return _view - - -class TestMaskedMatrix: - def test_matrix_indexing(self): - # Tests conversions and indexing - x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) - x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) - x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) - x4 = masked_array(x1) - # test conversion to strings - str(x2) # raises? - repr(x2) # raises? - # tests of indexing - assert_(type(x2[1, 0]) is type(x1[1, 0])) - assert_(x1[1, 0] == x2[1, 0]) - assert_(x2[1, 1] is masked) - assert_equal(x1[0, 2], x2[0, 2]) - assert_equal(x1[0, 1:], x2[0, 1:]) - assert_equal(x1[:, 2], x2[:, 2]) - assert_equal(x1[:], x2[:]) - assert_equal(x1[1:], x3[1:]) - x1[0, 2] = 9 - x2[0, 2] = 9 - assert_equal(x1, x2) - x1[0, 1:] = 99 - x2[0, 1:] = 99 - assert_equal(x1, x2) - x2[0, 1] = masked - assert_equal(x1, x2) - x2[0, 1:] = masked - assert_equal(x1, x2) - x2[0, :] = x1[0, :] - x2[0, 1] = masked - assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) - x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) - assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) - x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) - assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) - assert_(allequal(x4[1], masked_array([1, 2, 3]))) - x1 = np.matrix(np.arange(5) * 1.0) - x2 = masked_values(x1, 3.0) - assert_equal(x1, x2) - assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), - x2.mask)) - assert_equal(3.0, x2.fill_value) - - def test_pickling_subbaseclass(self): - # Test pickling w/ a subclass of ndarray - a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) - for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): - a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) - assert_equal(a_pickled._mask, a._mask) - assert_equal(a_pickled, a) - assert_(isinstance(a_pickled._data, np.matrix)) - - def test_count_mean_with_matrix(self): - m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) - - assert_equal(m.count(axis=0).shape, (1, 2)) - assert_equal(m.count(axis=1).shape, (2, 1)) - - # Make sure broadcasting inside mean and var work - assert_equal(m.mean(axis=0), [[2., 3.]]) - assert_equal(m.mean(axis=1), [[1.5], [3.5]]) - - def test_flat(self): - # Test that flat can return items even for matrices [#4585, #4615] - # test simple access - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - assert_equal(test.flat[1], 2) - assert_equal(test.flat[2], masked) - assert_(np.all(test.flat[0:2] == test[0, 0:2])) - # Test flat on masked_matrices - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) - control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) - assert_equal(test, control) - # Test setting - test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - testflat = test.flat - testflat[:] = testflat[[2, 1, 0]] - assert_equal(test, control) - testflat[0] = 9 - # test that matrices keep the correct shape (#4615) - a = masked_array(np.matrix(np.eye(2)), mask=0) - b = a.flat - b01 = b[:2] - assert_equal(b01.data, np.array([[1., 0.]])) - assert_equal(b01.mask, np.array([[False, False]])) - - def test_allany_onmatrices(self): - x = np.array([[0.13, 0.26, 0.90], - [0.28, 0.33, 0.63], - [0.31, 0.87, 0.70]]) - X = np.matrix(x) - m = np.array([[True, False, False], - [False, False, False], - [True, True, False]], dtype=np.bool_) - mX = masked_array(X, mask=m) - mXbig = (mX > 0.5) - mXsmall = (mX < 0.5) - - assert_(not mXbig.all()) - assert_(mXbig.any()) - assert_equal(mXbig.all(0), np.matrix([False, False, True])) - assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), np.matrix([False, False, True])) - assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) - - assert_(not mXsmall.all()) - assert_(mXsmall.any()) - assert_equal(mXsmall.all(0), np.matrix([True, True, False])) - assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), np.matrix([True, True, False])) - assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) - - def test_compressed(self): - a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) - b = a.compressed() - assert_equal(b, a) - assert_(isinstance(b, np.matrix)) - a[0, 0] = masked - b = a.compressed() - assert_equal(b, [[2, 3, 4]]) - - def test_ravel(self): - a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) - aravel = a.ravel() - assert_equal(aravel.shape, (1, 5)) - assert_equal(aravel._mask.shape, a.shape) - - def test_view(self): - # Test view w/ flexible dtype - iterator = list(zip(np.arange(10), np.random.rand(10))) - data = np.array(iterator) - a = masked_array(iterator, dtype=[('a', float), ('b', float)]) - a.mask[0] = (1, 0) - test = a.view((float, 2), np.matrix) - assert_equal(test, data) - assert_(isinstance(test, np.matrix)) - assert_(not isinstance(test, MaskedArray)) - - -class TestSubclassing: - # Test suite for masked subclasses of ndarray. - - def setup_method(self): - x = np.arange(5, dtype='float') - mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) - self.data = (x, mx) - - def test_maskedarray_subclassing(self): - # Tests subclassing MaskedArray - (x, mx) = self.data - assert_(isinstance(mx._data, np.matrix)) - - def test_masked_unary_operations(self): - # Tests masked_unary_operation - (x, mx) = self.data - with np.errstate(divide='ignore'): - assert_(isinstance(log(mx), MMatrix)) - assert_equal(log(x), np.log(x)) - - def test_masked_binary_operations(self): - # Tests masked_binary_operation - (x, mx) = self.data - # Result should be a MMatrix - assert_(isinstance(add(mx, mx), MMatrix)) - assert_(isinstance(add(mx, x), MMatrix)) - # Result should work - assert_equal(add(mx, x), mx+x) - assert_(isinstance(add(mx, mx)._data, np.matrix)) - with assert_warns(DeprecationWarning): - assert_(isinstance(add.outer(mx, mx), MMatrix)) - assert_(isinstance(hypot(mx, mx), MMatrix)) - assert_(isinstance(hypot(mx, x), MMatrix)) - - def test_masked_binary_operations2(self): - # Tests domained_masked_binary_operation - (x, mx) = self.data - xmx = masked_array(mx.data.__array__(), mask=mx.mask) - assert_(isinstance(divide(mx, mx), MMatrix)) - assert_(isinstance(divide(mx, x), MMatrix)) - assert_equal(divide(mx, mx), divide(xmx, xmx)) - -class TestConcatenator: - # Tests for mr_, the equivalent of r_ for masked arrays. - - def test_matrix_builder(self): - assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) - - def test_matrix(self): - # Test consistency with unmasked version. If we ever deprecate - # matrix, this test should either still pass, or both actual and - # expected should fail to be build. - actual = mr_['r', 1, 2, 3] - expected = np.ma.array(np.r_['r', 1, 2, 3]) - assert_array_equal(actual, expected) - - # outer type is masked array, inner type is matrix - assert_equal(type(actual), type(expected)) - assert_equal(type(actual.data), type(expected.data)) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py deleted file mode 100644 index 2b3f3d3d16ac6c49d231ac526fa89570975e4bfb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/boolean/test_function.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -import pandas._testing as tm - - -@pytest.mark.parametrize( - "ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor] -) -def test_ufuncs_binary(ufunc): - # two BooleanArrays - a = pd.array([True, False, None], dtype="boolean") - result = ufunc(a, a) - expected = pd.array(ufunc(a._data, a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - s = pd.Series(a) - result = ufunc(s, a) - expected = pd.Series(ufunc(a._data, a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_series_equal(result, expected) - - # Boolean with numpy array - arr = np.array([True, True, False]) - result = ufunc(a, arr) - expected = pd.array(ufunc(a._data, arr), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - result = ufunc(arr, a) - expected = pd.array(ufunc(arr, a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - # BooleanArray with scalar - result = ufunc(a, True) - expected = pd.array(ufunc(a._data, True), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - result = ufunc(True, a) - expected = pd.array(ufunc(True, a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - # not handled types - msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__" - with pytest.raises(TypeError, match=msg): - ufunc(a, "test") - - -@pytest.mark.parametrize("ufunc", [np.logical_not]) -def test_ufuncs_unary(ufunc): - a = pd.array([True, False, None], dtype="boolean") - result = ufunc(a) - expected = pd.array(ufunc(a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_extension_array_equal(result, expected) - - ser = pd.Series(a) - result = ufunc(ser) - expected = pd.Series(ufunc(a._data), dtype="boolean") - expected[a._mask] = np.nan - tm.assert_series_equal(result, expected) - - -def test_ufunc_numeric(): - # np.sqrt on np.bool_ returns float16, which we upcast to Float32 - # bc we do not have Float16 - arr = pd.array([True, False, None], dtype="boolean") - - res = np.sqrt(arr) - - expected = pd.array([1, 0, None], dtype="Float32") - tm.assert_extension_array_equal(res, expected) - - -@pytest.mark.parametrize("values", [[True, False], [True, None]]) -def test_ufunc_reduce_raises(values): - arr = pd.array(values, dtype="boolean") - - res = np.add.reduce(arr) - if arr[-1] is pd.NA: - expected = pd.NA - else: - expected = arr._data.sum() - tm.assert_almost_equal(res, expected) - - -def test_value_counts_na(): - arr = pd.array([True, False, pd.NA], dtype="boolean") - result = arr.value_counts(dropna=False) - expected = pd.Series([1, 1, 1], index=arr, dtype="Int64", name="count") - assert expected.index.dtype == arr.dtype - tm.assert_series_equal(result, expected) - - result = arr.value_counts(dropna=True) - expected = pd.Series([1, 1], index=arr[:-1], dtype="Int64", name="count") - assert expected.index.dtype == arr.dtype - tm.assert_series_equal(result, expected) - - -def test_value_counts_with_normalize(): - ser = pd.Series([True, False, pd.NA], dtype="boolean") - result = ser.value_counts(normalize=True) - expected = pd.Series([1, 1], index=ser[:-1], dtype="Float64", name="proportion") / 2 - assert expected.index.dtype == "boolean" - tm.assert_series_equal(result, expected) - - -def test_diff(): - a = pd.array( - [True, True, False, False, True, None, True, None, False], dtype="boolean" - ) - result = pd.core.algorithms.diff(a, 1) - expected = pd.array( - [None, False, True, False, True, None, None, None, None], dtype="boolean" - ) - tm.assert_extension_array_equal(result, expected) - - ser = pd.Series(a) - result = ser.diff() - expected = pd.Series(expected) - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py deleted file mode 100644 index d7a2140f817f3a8e5689d001768cf5642118b105..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/interval/test_astype.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest - -from pandas import ( - Categorical, - CategoricalDtype, - Index, - IntervalIndex, -) -import pandas._testing as tm - - -class TestAstype: - @pytest.mark.parametrize("ordered", [True, False]) - def test_astype_categorical_retains_ordered(self, ordered): - index = IntervalIndex.from_breaks(range(5)) - arr = index._data - - dtype = CategoricalDtype(None, ordered=ordered) - - expected = Categorical(list(arr), ordered=ordered) - result = arr.astype(dtype) - assert result.ordered is ordered - tm.assert_categorical_equal(result, expected) - - # test IntervalIndex.astype while we're at it. - result = index.astype(dtype) - expected = Index(expected) - tm.assert_index_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/json/test_normalize.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/json/test_normalize.py deleted file mode 100644 index 316f2628854245cbab6bd5e646d2adceef9e820a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/json/test_normalize.py +++ /dev/null @@ -1,907 +0,0 @@ -import json - -import numpy as np -import pytest - -from pandas import ( - DataFrame, - Index, - Series, - json_normalize, -) -import pandas._testing as tm - -from pandas.io.json._normalize import nested_to_record - - -@pytest.fixture -def deep_nested(): - # deeply nested data - return [ - { - "country": "USA", - "states": [ - { - "name": "California", - "cities": [ - {"name": "San Francisco", "pop": 12345}, - {"name": "Los Angeles", "pop": 12346}, - ], - }, - { - "name": "Ohio", - "cities": [ - {"name": "Columbus", "pop": 1234}, - {"name": "Cleveland", "pop": 1236}, - ], - }, - ], - }, - { - "country": "Germany", - "states": [ - {"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]}, - { - "name": "Nordrhein-Westfalen", - "cities": [ - {"name": "Duesseldorf", "pop": 1238}, - {"name": "Koeln", "pop": 1239}, - ], - }, - ], - }, - ] - - -@pytest.fixture -def state_data(): - return [ - { - "counties": [ - {"name": "Dade", "population": 12345}, - {"name": "Broward", "population": 40000}, - {"name": "Palm Beach", "population": 60000}, - ], - "info": {"governor": "Rick Scott"}, - "shortname": "FL", - "state": "Florida", - }, - { - "counties": [ - {"name": "Summit", "population": 1234}, - {"name": "Cuyahoga", "population": 1337}, - ], - "info": {"governor": "John Kasich"}, - "shortname": "OH", - "state": "Ohio", - }, - ] - - -@pytest.fixture -def author_missing_data(): - return [ - {"info": None}, - { - "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"}, - "author_name": {"first": "Jane", "last_name": "Doe"}, - }, - ] - - -@pytest.fixture -def missing_metadata(): - return [ - { - "name": "Alice", - "addresses": [ - { - "number": 9562, - "street": "Morris St.", - "city": "Massillon", - "state": "OH", - "zip": 44646, - } - ], - "previous_residences": {"cities": [{"city_name": "Foo York City"}]}, - }, - { - "addresses": [ - { - "number": 8449, - "street": "Spring St.", - "city": "Elizabethton", - "state": "TN", - "zip": 37643, - } - ], - "previous_residences": {"cities": [{"city_name": "Barmingham"}]}, - }, - ] - - -@pytest.fixture -def max_level_test_input_data(): - """ - input data to test json_normalize with max_level param - """ - return [ - { - "CreatedBy": {"Name": "User001"}, - "Lookup": { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - }, - "Image": {"a": "b"}, - } - ] - - -class TestJSONNormalize: - def test_simple_records(self): - recs = [ - {"a": 1, "b": 2, "c": 3}, - {"a": 4, "b": 5, "c": 6}, - {"a": 7, "b": 8, "c": 9}, - {"a": 10, "b": 11, "c": 12}, - ] - - result = json_normalize(recs) - expected = DataFrame(recs) - - tm.assert_frame_equal(result, expected) - - def test_simple_normalize(self, state_data): - result = json_normalize(state_data[0], "counties") - expected = DataFrame(state_data[0]["counties"]) - tm.assert_frame_equal(result, expected) - - result = json_normalize(state_data, "counties") - - expected = [] - for rec in state_data: - expected.extend(rec["counties"]) - expected = DataFrame(expected) - - tm.assert_frame_equal(result, expected) - - result = json_normalize(state_data, "counties", meta="state") - expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2]) - - tm.assert_frame_equal(result, expected) - - def test_fields_list_type_normalize(self): - parse_metadata_fields_list_type = [ - {"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}} - ] - result = json_normalize( - parse_metadata_fields_list_type, - record_path=["values"], - meta=[["metadata", "listdata"]], - ) - expected = DataFrame( - {0: [1, 2, 3], "metadata.listdata": [[1, 2], [1, 2], [1, 2]]} - ) - tm.assert_frame_equal(result, expected) - - def test_empty_array(self): - result = json_normalize([]) - expected = DataFrame() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "data, record_path, exception_type", - [ - ([{"a": 0}, {"a": 1}], None, None), - ({"a": [{"a": 0}, {"a": 1}]}, "a", None), - ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError), - (None, None, NotImplementedError), - ], - ) - def test_accepted_input(self, data, record_path, exception_type): - if exception_type is not None: - with pytest.raises(exception_type, match=tm.EMPTY_STRING_PATTERN): - json_normalize(data, record_path=record_path) - else: - result = json_normalize(data, record_path=record_path) - expected = DataFrame([0, 1], columns=["a"]) - tm.assert_frame_equal(result, expected) - - def test_simple_normalize_with_separator(self, deep_nested): - # GH 14883 - result = json_normalize({"A": {"A": 1, "B": 2}}) - expected = DataFrame([[1, 2]], columns=["A.A", "A.B"]) - tm.assert_frame_equal(result.reindex_like(expected), expected) - - result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_") - expected = DataFrame([[1, 2]], columns=["A_A", "A_B"]) - tm.assert_frame_equal(result.reindex_like(expected), expected) - - result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3") - expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"]) - tm.assert_frame_equal(result.reindex_like(expected), expected) - - result = json_normalize( - deep_nested, - ["states", "cities"], - meta=["country", ["states", "name"]], - sep="_", - ) - expected = Index(["name", "pop", "country", "states_name"]).sort_values() - assert result.columns.sort_values().equals(expected) - - def test_normalize_with_multichar_separator(self): - # GH #43831 - data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}} - result = json_normalize(data, sep="__") - expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"]) - tm.assert_frame_equal(result, expected) - - def test_value_array_record_prefix(self): - # GH 21536 - result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.") - expected = DataFrame([[1], [2]], columns=["Prefix.0"]) - tm.assert_frame_equal(result, expected) - - def test_nested_object_record_path(self): - # GH 22706 - data = { - "state": "Florida", - "info": { - "governor": "Rick Scott", - "counties": [ - {"name": "Dade", "population": 12345}, - {"name": "Broward", "population": 40000}, - {"name": "Palm Beach", "population": 60000}, - ], - }, - } - result = json_normalize(data, record_path=["info", "counties"]) - expected = DataFrame( - [["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]], - columns=["name", "population"], - ) - tm.assert_frame_equal(result, expected) - - def test_more_deeply_nested(self, deep_nested): - result = json_normalize( - deep_nested, ["states", "cities"], meta=["country", ["states", "name"]] - ) - ex_data = { - "country": ["USA"] * 4 + ["Germany"] * 3, - "states.name": [ - "California", - "California", - "Ohio", - "Ohio", - "Bayern", - "Nordrhein-Westfalen", - "Nordrhein-Westfalen", - ], - "name": [ - "San Francisco", - "Los Angeles", - "Columbus", - "Cleveland", - "Munich", - "Duesseldorf", - "Koeln", - ], - "pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239], - } - - expected = DataFrame(ex_data, columns=result.columns) - tm.assert_frame_equal(result, expected) - - def test_shallow_nested(self): - data = [ - { - "state": "Florida", - "shortname": "FL", - "info": {"governor": "Rick Scott"}, - "counties": [ - {"name": "Dade", "population": 12345}, - {"name": "Broward", "population": 40000}, - {"name": "Palm Beach", "population": 60000}, - ], - }, - { - "state": "Ohio", - "shortname": "OH", - "info": {"governor": "John Kasich"}, - "counties": [ - {"name": "Summit", "population": 1234}, - {"name": "Cuyahoga", "population": 1337}, - ], - }, - ] - - result = json_normalize( - data, "counties", ["state", "shortname", ["info", "governor"]] - ) - ex_data = { - "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"], - "state": ["Florida"] * 3 + ["Ohio"] * 2, - "shortname": ["FL", "FL", "FL", "OH", "OH"], - "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2, - "population": [12345, 40000, 60000, 1234, 1337], - } - expected = DataFrame(ex_data, columns=result.columns) - tm.assert_frame_equal(result, expected) - - def test_nested_meta_path_with_nested_record_path(self, state_data): - # GH 27220 - result = json_normalize( - data=state_data, - record_path=["counties"], - meta=["state", "shortname", ["info", "governor"]], - errors="ignore", - ) - - ex_data = { - "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"], - "population": [12345, 40000, 60000, 1234, 1337], - "state": ["Florida"] * 3 + ["Ohio"] * 2, - "shortname": ["FL"] * 3 + ["OH"] * 2, - "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2, - } - - expected = DataFrame(ex_data) - tm.assert_frame_equal(result, expected) - - def test_meta_name_conflict(self): - data = [ - { - "foo": "hello", - "bar": "there", - "data": [ - {"foo": "something", "bar": "else"}, - {"foo": "something2", "bar": "else2"}, - ], - } - ] - - msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix" - with pytest.raises(ValueError, match=msg): - json_normalize(data, "data", meta=["foo", "bar"]) - - result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta") - - for val in ["metafoo", "metabar", "foo", "bar"]: - assert val in result - - def test_meta_parameter_not_modified(self): - # GH 18610 - data = [ - { - "foo": "hello", - "bar": "there", - "data": [ - {"foo": "something", "bar": "else"}, - {"foo": "something2", "bar": "else2"}, - ], - } - ] - - COLUMNS = ["foo", "bar"] - result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta") - - assert COLUMNS == ["foo", "bar"] - for val in ["metafoo", "metabar", "foo", "bar"]: - assert val in result - - def test_record_prefix(self, state_data): - result = json_normalize(state_data[0], "counties") - expected = DataFrame(state_data[0]["counties"]) - tm.assert_frame_equal(result, expected) - - result = json_normalize( - state_data, "counties", meta="state", record_prefix="county_" - ) - - expected = [] - for rec in state_data: - expected.extend(rec["counties"]) - expected = DataFrame(expected) - expected = expected.rename(columns=lambda x: "county_" + x) - expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2]) - - tm.assert_frame_equal(result, expected) - - def test_non_ascii_key(self): - testjson = ( - b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' - b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]' - ).decode("utf8") - - testdata = { - b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1], - "sub.A": [1, 3], - "sub.B": [2, 4], - } - expected = DataFrame(testdata) - - result = json_normalize(json.loads(testjson)) - tm.assert_frame_equal(result, expected) - - def test_missing_field(self, author_missing_data): - # GH20030: - result = json_normalize(author_missing_data) - ex_data = [ - { - "info": np.nan, - "info.created_at": np.nan, - "info.last_updated": np.nan, - "author_name.first": np.nan, - "author_name.last_name": np.nan, - }, - { - "info": None, - "info.created_at": "11/08/1993", - "info.last_updated": "26/05/2012", - "author_name.first": "Jane", - "author_name.last_name": "Doe", - }, - ] - expected = DataFrame(ex_data) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "max_level,expected", - [ - ( - 0, - [ - { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - "CreatedBy": {"Name": "User001"}, - "Image": {"a": "b"}, - }, - { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - "CreatedBy": {"Name": "User001"}, - "Image": {"a": "b"}, - }, - ], - ), - ( - 1, - [ - { - "TextField": "Some text", - "UserField.Id": "ID001", - "UserField.Name": "Name001", - "CreatedBy": {"Name": "User001"}, - "Image": {"a": "b"}, - }, - { - "TextField": "Some text", - "UserField.Id": "ID001", - "UserField.Name": "Name001", - "CreatedBy": {"Name": "User001"}, - "Image": {"a": "b"}, - }, - ], - ), - ], - ) - def test_max_level_with_records_path(self, max_level, expected): - # GH23843: Enhanced JSON normalize - test_input = [ - { - "CreatedBy": {"Name": "User001"}, - "Lookup": [ - { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - }, - { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - }, - ], - "Image": {"a": "b"}, - "tags": [ - {"foo": "something", "bar": "else"}, - {"foo": "something2", "bar": "else2"}, - ], - } - ] - - result = json_normalize( - test_input, - record_path=["Lookup"], - meta=[["CreatedBy"], ["Image"]], - max_level=max_level, - ) - expected_df = DataFrame(data=expected, columns=result.columns.values) - tm.assert_equal(expected_df, result) - - def test_nested_flattening_consistent(self): - # see gh-21537 - df1 = json_normalize([{"A": {"B": 1}}]) - df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy") - - # They should be the same. - tm.assert_frame_equal(df1, df2) - - def test_nonetype_record_path(self, nulls_fixture): - # see gh-30148 - # should not raise TypeError - result = json_normalize( - [ - {"state": "Texas", "info": nulls_fixture}, - {"state": "Florida", "info": [{"i": 2}]}, - ], - record_path=["info"], - ) - expected = DataFrame({"i": 2}, index=[0]) - tm.assert_equal(result, expected) - - @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"']) - def test_non_list_record_path_errors(self, value): - # see gh-30148, GH 26284 - parsed_value = json.loads(value) - test_input = {"state": "Texas", "info": parsed_value} - test_path = "info" - msg = ( - f"{test_input} has non list value {parsed_value} for path {test_path}. " - "Must be list or null." - ) - with pytest.raises(TypeError, match=msg): - json_normalize([test_input], record_path=[test_path]) - - def test_meta_non_iterable(self): - # GH 31507 - data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]""" - - result = json_normalize(json.loads(data), record_path=["data"], meta=["id"]) - expected = DataFrame( - {"one": [1], "two": [2], "id": np.array([99], dtype=object)} - ) - tm.assert_frame_equal(result, expected) - - def test_generator(self, state_data): - # GH35923 Fix pd.json_normalize to not skip the first element of a - # generator input - def generator_data(): - yield from state_data[0]["counties"] - - result = json_normalize(generator_data()) - expected = DataFrame(state_data[0]["counties"]) - - tm.assert_frame_equal(result, expected) - - def test_top_column_with_leading_underscore(self): - # 49861 - data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4} - result = json_normalize(data, sep="_") - expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"]) - - tm.assert_frame_equal(result, expected) - - -class TestNestedToRecord: - def test_flat_stays_flat(self): - recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}] - result = nested_to_record(recs) - expected = recs - assert result == expected - - def test_one_level_deep_flattens(self): - data = {"flat1": 1, "dict1": {"c": 1, "d": 2}} - - result = nested_to_record(data) - expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1} - - assert result == expected - - def test_nested_flattens(self): - data = { - "flat1": 1, - "dict1": {"c": 1, "d": 2}, - "nested": {"e": {"c": 1, "d": 2}, "d": 2}, - } - - result = nested_to_record(data) - expected = { - "dict1.c": 1, - "dict1.d": 2, - "flat1": 1, - "nested.d": 2, - "nested.e.c": 1, - "nested.e.d": 2, - } - - assert result == expected - - def test_json_normalize_errors(self, missing_metadata): - # GH14583: - # If meta keys are not always present a new option to set - # errors='ignore' has been implemented - - msg = ( - "Key 'name' not found. To replace missing values of " - "'name' with np.nan, pass in errors='ignore'" - ) - with pytest.raises(KeyError, match=msg): - json_normalize( - data=missing_metadata, - record_path="addresses", - meta="name", - errors="raise", - ) - - def test_missing_meta(self, missing_metadata): - # GH25468 - # If metadata is nullable with errors set to ignore, the null values - # should be numpy.nan values - result = json_normalize( - data=missing_metadata, record_path="addresses", meta="name", errors="ignore" - ) - ex_data = [ - [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"], - [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan], - ] - columns = ["number", "street", "city", "state", "zip", "name"] - expected = DataFrame(ex_data, columns=columns) - tm.assert_frame_equal(result, expected) - - def test_missing_nested_meta(self): - # GH44312 - # If errors="ignore" and nested metadata is null, we should return nan - data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]} - result = json_normalize( - data, - record_path="value", - meta=["meta", ["nested_meta", "leaf"]], - errors="ignore", - ) - ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]] - columns = ["rec", "meta", "nested_meta.leaf"] - expected = DataFrame(ex_data, columns=columns).astype( - {"nested_meta.leaf": object} - ) - tm.assert_frame_equal(result, expected) - - # If errors="raise" and nested metadata is null, we should raise with the - # key of the first missing level - with pytest.raises(KeyError, match="'leaf' not found"): - json_normalize( - data, - record_path="value", - meta=["meta", ["nested_meta", "leaf"]], - errors="raise", - ) - - def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata): - # GH41876 - # Ensure errors='raise' works as intended even when a record_path of length - # greater than one is passed in - msg = ( - "Key 'name' not found. To replace missing values of " - "'name' with np.nan, pass in errors='ignore'" - ) - with pytest.raises(KeyError, match=msg): - json_normalize( - data=missing_metadata, - record_path=["previous_residences", "cities"], - meta="name", - errors="raise", - ) - - def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata): - # GH41876 - # Ensure errors='ignore' works as intended even when a record_path of length - # greater than one is passed in - result = json_normalize( - data=missing_metadata, - record_path=["previous_residences", "cities"], - meta="name", - errors="ignore", - ) - ex_data = [ - ["Foo York City", "Alice"], - ["Barmingham", np.nan], - ] - columns = ["city_name", "name"] - expected = DataFrame(ex_data, columns=columns) - tm.assert_frame_equal(result, expected) - - def test_donot_drop_nonevalues(self): - # GH21356 - data = [ - {"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}}, - { - "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"}, - "author_name": {"first": "Jane", "last_name": "Doe"}, - }, - ] - result = nested_to_record(data) - expected = [ - { - "info": None, - "author_name.first": "Smith", - "author_name.last_name": "Appleseed", - }, - { - "author_name.first": "Jane", - "author_name.last_name": "Doe", - "info.created_at": "11/08/1993", - "info.last_updated": "26/05/2012", - }, - ] - - assert result == expected - - def test_nonetype_top_level_bottom_level(self): - # GH21158: If inner level json has a key with a null value - # make sure it does not do a new_d.pop twice and except - data = { - "id": None, - "location": { - "country": { - "state": { - "id": None, - "town.info": { - "id": None, - "region": None, - "x": 49.151580810546875, - "y": -33.148521423339844, - "z": 27.572303771972656, - }, - } - } - }, - } - result = nested_to_record(data) - expected = { - "id": None, - "location.country.state.id": None, - "location.country.state.town.info.id": None, - "location.country.state.town.info.region": None, - "location.country.state.town.info.x": 49.151580810546875, - "location.country.state.town.info.y": -33.148521423339844, - "location.country.state.town.info.z": 27.572303771972656, - } - assert result == expected - - def test_nonetype_multiple_levels(self): - # GH21158: If inner level json has a key with a null value - # make sure it does not do a new_d.pop twice and except - data = { - "id": None, - "location": { - "id": None, - "country": { - "id": None, - "state": { - "id": None, - "town.info": { - "region": None, - "x": 49.151580810546875, - "y": -33.148521423339844, - "z": 27.572303771972656, - }, - }, - }, - }, - } - result = nested_to_record(data) - expected = { - "id": None, - "location.id": None, - "location.country.id": None, - "location.country.state.id": None, - "location.country.state.town.info.region": None, - "location.country.state.town.info.x": 49.151580810546875, - "location.country.state.town.info.y": -33.148521423339844, - "location.country.state.town.info.z": 27.572303771972656, - } - assert result == expected - - @pytest.mark.parametrize( - "max_level, expected", - [ - ( - None, - [ - { - "CreatedBy.Name": "User001", - "Lookup.TextField": "Some text", - "Lookup.UserField.Id": "ID001", - "Lookup.UserField.Name": "Name001", - "Image.a": "b", - } - ], - ), - ( - 0, - [ - { - "CreatedBy": {"Name": "User001"}, - "Lookup": { - "TextField": "Some text", - "UserField": {"Id": "ID001", "Name": "Name001"}, - }, - "Image": {"a": "b"}, - } - ], - ), - ( - 1, - [ - { - "CreatedBy.Name": "User001", - "Lookup.TextField": "Some text", - "Lookup.UserField": {"Id": "ID001", "Name": "Name001"}, - "Image.a": "b", - } - ], - ), - ], - ) - def test_with_max_level(self, max_level, expected, max_level_test_input_data): - # GH23843: Enhanced JSON normalize - output = nested_to_record(max_level_test_input_data, max_level=max_level) - assert output == expected - - def test_with_large_max_level(self): - # GH23843: Enhanced JSON normalize - max_level = 100 - input_data = [ - { - "CreatedBy": { - "user": { - "name": {"firstname": "Leo", "LastName": "Thomson"}, - "family_tree": { - "father": { - "name": "Father001", - "father": { - "Name": "Father002", - "father": { - "name": "Father003", - "father": {"Name": "Father004"}, - }, - }, - } - }, - } - } - } - ] - expected = [ - { - "CreatedBy.user.name.firstname": "Leo", - "CreatedBy.user.name.LastName": "Thomson", - "CreatedBy.user.family_tree.father.name": "Father001", - "CreatedBy.user.family_tree.father.father.Name": "Father002", - "CreatedBy.user.family_tree.father.father.father.name": "Father003", - "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501 - } - ] - output = nested_to_record(input_data, max_level=max_level) - assert output == expected - - def test_series_non_zero_index(self): - # GH 19020 - data = { - 0: {"id": 1, "name": "Foo", "elements": {"a": 1}}, - 1: {"id": 2, "name": "Bar", "elements": {"b": 2}}, - 2: {"id": 3, "name": "Baz", "elements": {"c": 3}}, - } - s = Series(data) - s.index = [1, 2, 3] - result = json_normalize(s) - expected = DataFrame( - { - "id": [1, 2, 3], - "name": ["Foo", "Bar", "Baz"], - "elements.a": [1.0, np.nan, np.nan], - "elements.b": [np.nan, 2.0, np.nan], - "elements.c": [np.nan, np.nan, 3.0], - } - ) - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_textreader.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_textreader.py deleted file mode 100644 index f150ed3903443c28140356afdeda16e85fd04a0e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/test_textreader.py +++ /dev/null @@ -1,343 +0,0 @@ -""" -Tests the TextReader class in parsers.pyx, which -is integral to the C engine in parsers.py -""" -from io import ( - BytesIO, - StringIO, -) - -import numpy as np -import pytest - -import pandas._libs.parsers as parser -from pandas._libs.parsers import TextReader - -from pandas import DataFrame -import pandas._testing as tm - -from pandas.io.parsers import ( - TextFileReader, - read_csv, -) -from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs - - -class TestTextReader: - @pytest.fixture - def csv_path(self, datapath): - return datapath("io", "data", "csv", "test1.csv") - - def test_file_handle(self, csv_path): - with open(csv_path, "rb") as f: - reader = TextReader(f) - reader.read() - - def test_file_handle_mmap(self, csv_path): - # this was never using memory_map=True - with open(csv_path, "rb") as f: - reader = TextReader(f, header=None) - reader.read() - - def test_StringIO(self, csv_path): - with open(csv_path, "rb") as f: - text = f.read() - src = BytesIO(text) - reader = TextReader(src, header=None) - reader.read() - - def test_string_factorize(self): - # should this be optional? - data = "a\nb\na\nb\na" - reader = TextReader(StringIO(data), header=None) - result = reader.read() - assert len(set(map(id, result[0]))) == 2 - - def test_skipinitialspace(self): - data = "a, b\na, b\na, b\na, b" - - reader = TextReader(StringIO(data), skipinitialspace=True, header=None) - result = reader.read() - - tm.assert_numpy_array_equal( - result[0], np.array(["a", "a", "a", "a"], dtype=np.object_) - ) - tm.assert_numpy_array_equal( - result[1], np.array(["b", "b", "b", "b"], dtype=np.object_) - ) - - def test_parse_booleans(self): - data = "True\nFalse\nTrue\nTrue" - - reader = TextReader(StringIO(data), header=None) - result = reader.read() - - assert result[0].dtype == np.bool_ - - def test_delimit_whitespace(self): - data = 'a b\na\t\t "b"\n"a"\t \t b' - - reader = TextReader(StringIO(data), delim_whitespace=True, header=None) - result = reader.read() - - tm.assert_numpy_array_equal( - result[0], np.array(["a", "a", "a"], dtype=np.object_) - ) - tm.assert_numpy_array_equal( - result[1], np.array(["b", "b", "b"], dtype=np.object_) - ) - - def test_embedded_newline(self): - data = 'a\n"hello\nthere"\nthis' - - reader = TextReader(StringIO(data), header=None) - result = reader.read() - - expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_) - tm.assert_numpy_array_equal(result[0], expected) - - def test_euro_decimal(self): - data = "12345,67\n345,678" - - reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None) - result = reader.read() - - expected = np.array([12345.67, 345.678]) - tm.assert_almost_equal(result[0], expected) - - def test_integer_thousands(self): - data = "123,456\n12,500" - - reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None) - result = reader.read() - - expected = np.array([123456, 12500], dtype=np.int64) - tm.assert_almost_equal(result[0], expected) - - def test_integer_thousands_alt(self): - data = "123.456\n12.500" - - reader = TextFileReader( - StringIO(data), delimiter=":", thousands=".", header=None - ) - result = reader.read() - - expected = DataFrame([123456, 12500]) - tm.assert_frame_equal(result, expected) - - def test_skip_bad_lines(self, capsys): - # too many lines, see #2430 for why - data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r" - - reader = TextReader(StringIO(data), delimiter=":", header=None) - msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4" - with pytest.raises(parser.ParserError, match=msg): - reader.read() - - reader = TextReader( - StringIO(data), delimiter=":", header=None, on_bad_lines=2 # Skip - ) - result = reader.read() - expected = { - 0: np.array(["a", "d", "g", "l"], dtype=object), - 1: np.array(["b", "e", "h", "m"], dtype=object), - 2: np.array(["c", "f", "i", "n"], dtype=object), - } - assert_array_dicts_equal(result, expected) - - reader = TextReader( - StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn - ) - reader.read() - captured = capsys.readouterr() - - assert "Skipping line 4" in captured.err - assert "Skipping line 6" in captured.err - - def test_header_not_enough_lines(self): - data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6" - - reader = TextReader(StringIO(data), delimiter=",", header=2) - header = reader.header - expected = [["a", "b", "c"]] - assert header == expected - - recs = reader.read() - expected = { - 0: np.array([1, 4], dtype=np.int64), - 1: np.array([2, 5], dtype=np.int64), - 2: np.array([3, 6], dtype=np.int64), - } - assert_array_dicts_equal(recs, expected) - - def test_escapechar(self): - data = '\\"hello world"\n\\"hello world"\n\\"hello world"' - - reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\") - result = reader.read() - expected = {0: np.array(['"hello world"'] * 3, dtype=object)} - assert_array_dicts_equal(result, expected) - - def test_eof_has_eol(self): - # handling of new line at EOF - pass - - def test_na_substitution(self): - pass - - def test_numpy_string_dtype(self): - data = """\ -a,1 -aa,2 -aaa,3 -aaaa,4 -aaaaa,5""" - - def _make_reader(**kwds): - if "dtype" in kwds: - kwds["dtype"] = ensure_dtype_objs(kwds["dtype"]) - return TextReader(StringIO(data), delimiter=",", header=None, **kwds) - - reader = _make_reader(dtype="S5,i4") - result = reader.read() - - assert result[0].dtype == "S5" - - ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5") - assert (result[0] == ex_values).all() - assert result[1].dtype == "i4" - - reader = _make_reader(dtype="S4") - result = reader.read() - assert result[0].dtype == "S4" - ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4") - assert (result[0] == ex_values).all() - assert result[1].dtype == "S4" - - def test_pass_dtype(self): - data = """\ -one,two -1,a -2,b -3,c -4,d""" - - def _make_reader(**kwds): - if "dtype" in kwds: - kwds["dtype"] = ensure_dtype_objs(kwds["dtype"]) - return TextReader(StringIO(data), delimiter=",", **kwds) - - reader = _make_reader(dtype={"one": "u1", 1: "S1"}) - result = reader.read() - assert result[0].dtype == "u1" - assert result[1].dtype == "S1" - - reader = _make_reader(dtype={"one": np.uint8, 1: object}) - result = reader.read() - assert result[0].dtype == "u1" - assert result[1].dtype == "O" - - reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")}) - result = reader.read() - assert result[0].dtype == "u1" - assert result[1].dtype == "O" - - def test_usecols(self): - data = """\ -a,b,c -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - - def _make_reader(**kwds): - return TextReader(StringIO(data), delimiter=",", **kwds) - - reader = _make_reader(usecols=(1, 2)) - result = reader.read() - - exp = _make_reader().read() - assert len(result) == 2 - assert (result[1] == exp[1]).all() - assert (result[2] == exp[2]).all() - - @pytest.mark.parametrize( - "text, kwargs", - [ - ("a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12", {"delimiter": ","}), - ( - "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12", - {"delim_whitespace": True}, - ), - ("a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12", {"delimiter": ","}), - ( - ( - "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r" - "AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r" - ",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0" - ), - {"delimiter": ","}, - ), - ("A B C\r 2 3\r4 5 6", {"delim_whitespace": True}), - ("A B C\r2 3\r4 5 6", {"delim_whitespace": True}), - ], - ) - def test_cr_delimited(self, text, kwargs): - nice_text = text.replace("\r", "\r\n") - result = TextReader(StringIO(text), **kwargs).read() - expected = TextReader(StringIO(nice_text), **kwargs).read() - assert_array_dicts_equal(result, expected) - - def test_empty_field_eof(self): - data = "a,b,c\n1,2,3\n4,," - - result = TextReader(StringIO(data), delimiter=",").read() - - expected = { - 0: np.array([1, 4], dtype=np.int64), - 1: np.array(["2", ""], dtype=object), - 2: np.array(["3", ""], dtype=object), - } - assert_array_dicts_equal(result, expected) - - # GH5664 - a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"]) - b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1]) - c = DataFrame( - [ - [1, 2, 3, 4], - [6, np.nan, np.nan, np.nan], - [8, 9, 10, 11], - [13, 14, np.nan, np.nan], - ], - columns=list("abcd"), - index=[0, 5, 7, 12], - ) - - for _ in range(100): - df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c") - tm.assert_frame_equal(df, a) - - df = read_csv( - StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c" - ) - tm.assert_frame_equal(df, b) - - df = read_csv( - StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"), - names=list("abcd"), - engine="c", - ) - tm.assert_frame_equal(df, c) - - def test_empty_csv_input(self): - # GH14867 - with read_csv( - StringIO(), chunksize=20, header=None, names=["a", "b", "c"] - ) as df: - assert isinstance(df, TextFileReader) - - -def assert_array_dicts_equal(left, right): - for k, v in left.items(): - tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k])) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/sas/test_sas7bdat.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/sas/test_sas7bdat.py deleted file mode 100644 index d56139d32b1dade66ba0a6e15fd15ddac835551d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/sas/test_sas7bdat.py +++ /dev/null @@ -1,399 +0,0 @@ -import contextlib -from datetime import datetime -import io -import os -from pathlib import Path - -import dateutil.parser -import numpy as np -import pytest - -from pandas.errors import EmptyDataError -import pandas.util._test_decorators as td - -import pandas as pd -import pandas._testing as tm - - -@pytest.fixture -def dirpath(datapath): - return datapath("io", "sas", "data") - - -@pytest.fixture(params=[(1, range(1, 16)), (2, [16])]) -def data_test_ix(request, dirpath): - i, test_ix = request.param - fname = os.path.join(dirpath, f"test_sas7bdat_{i}.csv") - df = pd.read_csv(fname) - epoch = datetime(1960, 1, 1) - t1 = pd.to_timedelta(df["Column4"], unit="d") - df["Column4"] = epoch + t1 - t2 = pd.to_timedelta(df["Column12"], unit="d") - df["Column12"] = epoch + t2 - for k in range(df.shape[1]): - col = df.iloc[:, k] - if col.dtype == np.int64: - df.isetitem(k, df.iloc[:, k].astype(np.float64)) - return df, test_ix - - -# https://github.com/cython/cython/issues/1720 -class TestSAS7BDAT: - @pytest.mark.slow - def test_from_file(self, dirpath, data_test_ix): - df0, test_ix = data_test_ix - for k in test_ix: - fname = os.path.join(dirpath, f"test{k}.sas7bdat") - df = pd.read_sas(fname, encoding="utf-8") - tm.assert_frame_equal(df, df0) - - @pytest.mark.slow - def test_from_buffer(self, dirpath, data_test_ix): - df0, test_ix = data_test_ix - for k in test_ix: - fname = os.path.join(dirpath, f"test{k}.sas7bdat") - with open(fname, "rb") as f: - byts = f.read() - buf = io.BytesIO(byts) - with pd.read_sas( - buf, format="sas7bdat", iterator=True, encoding="utf-8" - ) as rdr: - df = rdr.read() - tm.assert_frame_equal(df, df0, check_exact=False) - - @pytest.mark.slow - def test_from_iterator(self, dirpath, data_test_ix): - df0, test_ix = data_test_ix - for k in test_ix: - fname = os.path.join(dirpath, f"test{k}.sas7bdat") - with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: - df = rdr.read(2) - tm.assert_frame_equal(df, df0.iloc[0:2, :]) - df = rdr.read(3) - tm.assert_frame_equal(df, df0.iloc[2:5, :]) - - @pytest.mark.slow - def test_path_pathlib(self, dirpath, data_test_ix): - df0, test_ix = data_test_ix - for k in test_ix: - fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat")) - df = pd.read_sas(fname, encoding="utf-8") - tm.assert_frame_equal(df, df0) - - @td.skip_if_no("py.path") - @pytest.mark.slow - def test_path_localpath(self, dirpath, data_test_ix): - from py.path import local as LocalPath - - df0, test_ix = data_test_ix - for k in test_ix: - fname = LocalPath(os.path.join(dirpath, f"test{k}.sas7bdat")) - df = pd.read_sas(fname, encoding="utf-8") - tm.assert_frame_equal(df, df0) - - @pytest.mark.slow - @pytest.mark.parametrize("chunksize", (3, 5, 10, 11)) - @pytest.mark.parametrize("k", range(1, 17)) - def test_iterator_loop(self, dirpath, k, chunksize): - # github #13654 - fname = os.path.join(dirpath, f"test{k}.sas7bdat") - with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr: - y = 0 - for x in rdr: - y += x.shape[0] - assert y == rdr.row_count - - def test_iterator_read_too_much(self, dirpath): - # github #14734 - fname = os.path.join(dirpath, "test1.sas7bdat") - with pd.read_sas( - fname, format="sas7bdat", iterator=True, encoding="utf-8" - ) as rdr: - d1 = rdr.read(rdr.row_count + 20) - - with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr: - d2 = rdr.read(rdr.row_count + 20) - tm.assert_frame_equal(d1, d2) - - -def test_encoding_options(datapath): - fname = datapath("io", "sas", "data", "test1.sas7bdat") - df1 = pd.read_sas(fname) - df2 = pd.read_sas(fname, encoding="utf-8") - for col in df1.columns: - try: - df1[col] = df1[col].str.decode("utf-8") - except AttributeError: - pass - tm.assert_frame_equal(df1, df2) - - from pandas.io.sas.sas7bdat import SAS7BDATReader - - with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr: - df3 = rdr.read() - for x, y in zip(df1.columns, df3.columns): - assert x == y.decode() - - -def test_encoding_infer(datapath): - fname = datapath("io", "sas", "data", "test1.sas7bdat") - - with pd.read_sas(fname, encoding="infer", iterator=True) as df1_reader: - # check: is encoding inferred correctly from file - assert df1_reader.inferred_encoding == "cp1252" - df1 = df1_reader.read() - - with pd.read_sas(fname, encoding="cp1252", iterator=True) as df2_reader: - df2 = df2_reader.read() - - # check: reader reads correct information - tm.assert_frame_equal(df1, df2) - - -def test_productsales(datapath): - fname = datapath("io", "sas", "data", "productsales.sas7bdat") - df = pd.read_sas(fname, encoding="utf-8") - fname = datapath("io", "sas", "data", "productsales.csv") - df0 = pd.read_csv(fname, parse_dates=["MONTH"]) - vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"] - df0[vn] = df0[vn].astype(np.float64) - tm.assert_frame_equal(df, df0) - - -def test_12659(datapath): - fname = datapath("io", "sas", "data", "test_12659.sas7bdat") - df = pd.read_sas(fname) - fname = datapath("io", "sas", "data", "test_12659.csv") - df0 = pd.read_csv(fname) - df0 = df0.astype(np.float64) - tm.assert_frame_equal(df, df0) - - -def test_airline(datapath): - fname = datapath("io", "sas", "data", "airline.sas7bdat") - df = pd.read_sas(fname) - fname = datapath("io", "sas", "data", "airline.csv") - df0 = pd.read_csv(fname) - df0 = df0.astype(np.float64) - tm.assert_frame_equal(df, df0, check_exact=False) - - -def test_date_time(datapath): - # Support of different SAS date/datetime formats (PR #15871) - fname = datapath("io", "sas", "data", "datetime.sas7bdat") - df = pd.read_sas(fname) - fname = datapath("io", "sas", "data", "datetime.csv") - df0 = pd.read_csv( - fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"] - ) - # GH 19732: Timestamps imported from sas will incur floating point errors - df[df.columns[3]] = df.iloc[:, 3].dt.round("us") - tm.assert_frame_equal(df, df0) - - -@pytest.mark.parametrize("column", ["WGT", "CYL"]) -def test_compact_numerical_values(datapath, column): - # Regression test for #21616 - fname = datapath("io", "sas", "data", "cars.sas7bdat") - df = pd.read_sas(fname, encoding="latin-1") - # The two columns CYL and WGT in cars.sas7bdat have column - # width < 8 and only contain integral values. - # Test that pandas doesn't corrupt the numbers by adding - # decimals. - result = df[column] - expected = df[column].round() - tm.assert_series_equal(result, expected, check_exact=True) - - -def test_many_columns(datapath): - # Test for looking for column information in more places (PR #22628) - fname = datapath("io", "sas", "data", "many_columns.sas7bdat") - - df = pd.read_sas(fname, encoding="latin-1") - - fname = datapath("io", "sas", "data", "many_columns.csv") - df0 = pd.read_csv(fname, encoding="latin-1") - tm.assert_frame_equal(df, df0) - - -def test_inconsistent_number_of_rows(datapath): - # Regression test for issue #16615. (PR #22628) - fname = datapath("io", "sas", "data", "load_log.sas7bdat") - df = pd.read_sas(fname, encoding="latin-1") - assert len(df) == 2097 - - -def test_zero_variables(datapath): - # Check if the SAS file has zero variables (PR #18184) - fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") - with pytest.raises(EmptyDataError, match="No columns to parse from file"): - pd.read_sas(fname) - - -def test_zero_rows(datapath): - # GH 18198 - fname = datapath("io", "sas", "data", "zero_rows.sas7bdat") - result = pd.read_sas(fname) - expected = pd.DataFrame([{"char_field": "a", "num_field": 1.0}]).iloc[:0] - tm.assert_frame_equal(result, expected) - - -def test_corrupt_read(datapath): - # We don't really care about the exact failure, the important thing is - # that the resource should be cleaned up afterwards (BUG #35566) - fname = datapath("io", "sas", "data", "corrupt.sas7bdat") - msg = "'SAS7BDATReader' object has no attribute 'row_count'" - with pytest.raises(AttributeError, match=msg): - pd.read_sas(fname) - - -def round_datetime_to_ms(ts): - if isinstance(ts, datetime): - return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000) - elif isinstance(ts, str): - _ts = dateutil.parser.parse(timestr=ts) - return _ts.replace(microsecond=int(round(_ts.microsecond, -3) / 1000) * 1000) - else: - return ts - - -def test_max_sas_date(datapath): - # GH 20927 - # NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999 - # but this is read as 29DEC9999:23:59:59.998993 by a buggy - # sas7bdat module - fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") - df = pd.read_sas(fname, encoding="iso-8859-1") - - # SAS likes to left pad strings with spaces - lstrip before comparing - df = df.map(lambda x: x.lstrip() if isinstance(x, str) else x) - # GH 19732: Timestamps imported from sas will incur floating point errors - try: - df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") - except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: - df = df.map(round_datetime_to_ms) - except AttributeError: - df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) - # if there are any date/times > pandas.Timestamp.max then ALL in that chunk - # are returned as datetime.datetime - expected = pd.DataFrame( - { - "text": ["max", "normal"], - "dt_as_float": [253717747199.999, 1880323199.999], - "dt_as_dt": [ - datetime(9999, 12, 29, 23, 59, 59, 999000), - datetime(2019, 8, 1, 23, 59, 59, 999000), - ], - "date_as_float": [2936547.0, 21762.0], - "date_as_date": [datetime(9999, 12, 29), datetime(2019, 8, 1)], - }, - columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"], - ) - tm.assert_frame_equal(df, expected) - - -def test_max_sas_date_iterator(datapath): - # GH 20927 - # when called as an iterator, only those chunks with a date > pd.Timestamp.max - # are returned as datetime.datetime, if this happens that whole chunk is returned - # as datetime.datetime - col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"] - fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") - results = [] - for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1): - # SAS likes to left pad strings with spaces - lstrip before comparing - df = df.map(lambda x: x.lstrip() if isinstance(x, str) else x) - # GH 19732: Timestamps imported from sas will incur floating point errors - try: - df["dt_as_dt"] = df["dt_as_dt"].dt.round("us") - except pd._libs.tslibs.np_datetime.OutOfBoundsDatetime: - df = df.map(round_datetime_to_ms) - except AttributeError: - df["dt_as_dt"] = df["dt_as_dt"].apply(round_datetime_to_ms) - df.reset_index(inplace=True, drop=True) - results.append(df) - expected = [ - pd.DataFrame( - { - "text": ["max"], - "dt_as_float": [253717747199.999], - "dt_as_dt": [datetime(9999, 12, 29, 23, 59, 59, 999000)], - "date_as_float": [2936547.0], - "date_as_date": [datetime(9999, 12, 29)], - }, - columns=col_order, - ), - pd.DataFrame( - { - "text": ["normal"], - "dt_as_float": [1880323199.999], - "dt_as_dt": [np.datetime64("2019-08-01 23:59:59.999")], - "date_as_float": [21762.0], - "date_as_date": [np.datetime64("2019-08-01")], - }, - columns=col_order, - ), - ] - for result, expected in zip(results, expected): - tm.assert_frame_equal(result, expected) - - -def test_null_date(datapath): - fname = datapath("io", "sas", "data", "dates_null.sas7bdat") - df = pd.read_sas(fname, encoding="utf-8") - - expected = pd.DataFrame( - { - "datecol": [ - datetime(9999, 12, 29), - pd.NaT, - ], - "datetimecol": [ - datetime(9999, 12, 29, 23, 59, 59, 998993), - pd.NaT, - ], - }, - ) - tm.assert_frame_equal(df, expected) - - -def test_meta2_page(datapath): - # GH 35545 - fname = datapath("io", "sas", "data", "test_meta2_page.sas7bdat") - df = pd.read_sas(fname) - assert len(df) == 1000 - - -@pytest.mark.parametrize( - "test_file, override_offset, override_value, expected_msg", - [ - ("test2.sas7bdat", 0x10000 + 55229, 0x80 | 0x0F, "Out of bounds"), - ("test2.sas7bdat", 0x10000 + 55229, 0x10, "unknown control byte"), - ("test3.sas7bdat", 118170, 184, "Out of bounds"), - ], -) -def test_rle_rdc_exceptions( - datapath, test_file, override_offset, override_value, expected_msg -): - """Errors in RLE/RDC decompression should propagate.""" - with open(datapath("io", "sas", "data", test_file), "rb") as fd: - data = bytearray(fd.read()) - data[override_offset] = override_value - with pytest.raises(Exception, match=expected_msg): - pd.read_sas(io.BytesIO(data), format="sas7bdat") - - -def test_0x40_control_byte(datapath): - # GH 31243 - fname = datapath("io", "sas", "data", "0x40controlbyte.sas7bdat") - df = pd.read_sas(fname, encoding="ascii") - fname = datapath("io", "sas", "data", "0x40controlbyte.csv") - df0 = pd.read_csv(fname, dtype="object") - tm.assert_frame_equal(df, df0) - - -def test_0x00_control_byte(datapath): - # GH 47099 - fname = datapath("io", "sas", "data", "0x00controlbyte.sas7bdat.bz2") - df = next(pd.read_sas(fname, chunksize=11_000)) - assert df.shape == (11_000, 20) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/comal.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/comal.py deleted file mode 100644 index 666595503faf849ff96d6d5eee7724bd5d971412..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/comal.py +++ /dev/null @@ -1,80 +0,0 @@ -""" - pygments.lexers.comal - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for COMAL-80. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, words -from pygments.token import Comment, Whitespace, Operator, Keyword, String, \ - Number, Name, Punctuation - -__all__ = ["Comal80Lexer"] - - -class Comal80Lexer(RegexLexer): - """ - For COMAL-80 source code. - """ - - name = 'COMAL-80' - url = 'https://en.wikipedia.org/wiki/COMAL' - aliases = ['comal', 'comal80'] - filenames = ['*.cml', '*.comal'] - flags = re.IGNORECASE - # - # COMAL allows for some strange characters in names which we list here so - # keywords and word operators will not be recognized at the start of an - # identifier. - # - _suffix = r"\b(?!['\[\]←£\\])" - _identifier = r"[a-z]['\[\]←£\\\w]*" - - tokens = { - 'root': [ - (r'//.*\n', Comment.Single), - (r'\s+', Whitespace), - (r':[=+-]|\<\>|[-+*/^↑<>=]', Operator), - (r'(and +then|or +else)' + _suffix, Operator.Word), - (words([ - 'and', 'bitand', 'bitor', 'bitxor', 'div', 'in', 'mod', 'not', - 'or'], suffix=_suffix,), Operator.Word), - (words([ - 'append', 'at', 'case', 'chain', 'close', 'copy', 'create', 'cursor', - 'data', 'delete', 'dir', 'do', 'elif', 'else', 'end', 'endcase', 'endif', - 'endfor', 'endloop', 'endtrap', 'endwhile', 'exec', 'exit', 'file', - 'for', 'goto', 'handler', 'if', 'input', 'let', 'loop', 'mount', 'null', - 'of', 'open', 'otherwise', 'output', 'page', 'pass', 'poke', 'print', - 'random', 'read', 'repeat', 'report', 'return', 'rename', 'restore', - 'select', 'step', 'stop', 'sys', 'then', 'to', 'trap', 'unit', 'unit$', - 'until', 'using', 'when', 'while', 'write', 'zone'], suffix=_suffix), - Keyword.Reserved), - (words([ - 'closed', 'dim', 'endfunc', 'endproc', 'external', 'func', 'import', - 'proc', 'ref', 'use'], suffix=_suffix), Keyword.Declaration), - (words([ - 'abs', 'atn', 'chr$', 'cos', 'eod', 'eof', 'err', 'errfile', 'errtext', - 'esc', 'exp', 'int', 'key$', 'len', 'log', 'ord', 'peek', 'randomize', - 'rnd', 'sgn', 'sin', 'spc$', 'sqr', 'status$', 'str$', 'tab', 'tan', - 'time', 'val'], suffix=_suffix), Name.Builtin), - (words(['false', 'pi', 'true'], suffix=_suffix), Keyword.Constant), - (r'"', String, 'string'), - (_identifier + r":(?=[ \n/])", Name.Label), - (_identifier + r"[$#]?", Name), - (r'%[01]+', Number.Bin), - (r'\$[0-9a-f]+', Number.Hex), - (r'\d*\.\d*(e[-+]?\d+)?', Number.Float), - (r'\d+', Number.Integer), - (r'[(),:;]', Punctuation), - ], - 'string': [ - (r'[^"]+', String), - (r'"[0-9]*"', String.Escape), - (r'"', String, '#pop'), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/uri.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/uri.py deleted file mode 100644 index 385090f66ae36def8bca11e311803f2bec4ad558..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/websockets/uri.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import annotations - -import dataclasses -import urllib.parse -from typing import Optional, Tuple - -from . import exceptions - - -__all__ = ["parse_uri", "WebSocketURI"] - - -@dataclasses.dataclass -class WebSocketURI: - """ - WebSocket URI. - - Attributes: - secure: :obj:`True` for a ``wss`` URI, :obj:`False` for a ``ws`` URI. - host: Normalized to lower case. - port: Always set even if it's the default. - path: May be empty. - query: May be empty if the URI doesn't include a query component. - username: Available when the URI contains `User Information`_. - password: Available when the URI contains `User Information`_. - - .. _User Information: https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2.1 - - """ - - secure: bool - host: str - port: int - path: str - query: str - username: Optional[str] = None - password: Optional[str] = None - - @property - def resource_name(self) -> str: - if self.path: - resource_name = self.path - else: - resource_name = "/" - if self.query: - resource_name += "?" + self.query - return resource_name - - @property - def user_info(self) -> Optional[Tuple[str, str]]: - if self.username is None: - return None - assert self.password is not None - return (self.username, self.password) - - -# All characters from the gen-delims and sub-delims sets in RFC 3987. -DELIMS = ":/?#[]@!$&'()*+,;=" - - -def parse_uri(uri: str) -> WebSocketURI: - """ - Parse and validate a WebSocket URI. - - Args: - uri: WebSocket URI. - - Returns: - WebSocketURI: Parsed WebSocket URI. - - Raises: - InvalidURI: if ``uri`` isn't a valid WebSocket URI. - - """ - parsed = urllib.parse.urlparse(uri) - if parsed.scheme not in ["ws", "wss"]: - raise exceptions.InvalidURI(uri, "scheme isn't ws or wss") - if parsed.hostname is None: - raise exceptions.InvalidURI(uri, "hostname isn't provided") - if parsed.fragment != "": - raise exceptions.InvalidURI(uri, "fragment identifier is meaningless") - - secure = parsed.scheme == "wss" - host = parsed.hostname - port = parsed.port or (443 if secure else 80) - path = parsed.path - query = parsed.query - username = parsed.username - password = parsed.password - # urllib.parse.urlparse accepts URLs with a username but without a - # password. This doesn't make sense for HTTP Basic Auth credentials. - if username is not None and password is None: - raise exceptions.InvalidURI(uri, "username provided without password") - - try: - uri.encode("ascii") - except UnicodeEncodeError: - # Input contains non-ASCII characters. - # It must be an IRI. Convert it to a URI. - host = host.encode("idna").decode() - path = urllib.parse.quote(path, safe=DELIMS) - query = urllib.parse.quote(query, safe=DELIMS) - if username is not None: - assert password is not None - username = urllib.parse.quote(username, safe=DELIMS) - password = urllib.parse.quote(password, safe=DELIMS) - - return WebSocketURI(secure, host, port, path, query, username, password) diff --git a/spaces/pyodide-demo/self-hosted/imageio.js b/spaces/pyodide-demo/self-hosted/imageio.js deleted file mode 100644 index 709e6c9ca019d4ef2b8401f4eebf3212faf2b35b..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/imageio.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="imageio.data";var REMOTE_PACKAGE_BASE="imageio.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","imageio",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/imageio","core",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/imageio","plugins",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/imageio","resources",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/imageio/resources","images",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","imageio-2.9.0-py3.9.egg-info",true,true);Module["FS_createPath"]("/","bin",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:3847286,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1445,2675,3949,5359,6585,7990,9192,10365,11702,13198,14451,15765,17217,18593,19661,20540,21575,22777,23840,24823,26067,27272,28462,29639,30428,31700,33287,34653,35896,37088,38596,39959,41239,42732,44101,45628,47098,48303,49147,50204,51269,52474,53657,54795,55979,57100,58529,59600,60960,62276,63394,64410,65720,66962,68241,69770,71041,72533,73990,75201,76221,77155,78306,79054,80191,81393,82370,83565,84564,85454,86546,87716,88983,90067,91310,92678,93756,94877,95918,97065,98165,99338,100510,101597,102673,103996,105178,106521,107869,109274,110578,111578,112785,113883,115027,116186,117093,117977,119163,120371,121462,122590,123556,124643,125602,126536,127494,128577,129752,131036,132131,133083,134086,135012,136243,137909,139144,140417,141797,142798,143965,145094,146182,147458,148676,150055,151483,152958,154671,156197,157604,159092,160523,162012,163514,164917,166273,167637,168843,170214,171507,172493,173785,175043,176305,177499,178513,179446,180521,181250,182192,183264,184207,185230,186174,187118,188234,189065,189951,191103,192051,193205,194660,195764,196884,198123,199208,200162,201187,202306,203376,204448,205424,206425,207224,208278,209373,210481,211659,212670,213596,214572,215393,216594,217627,218668,219788,221175,222392,223300,223983,224923,226154,227220,228312,229265,230364,231354,232407,233610,234621,235696,236500,237311,238078,239277,240435,241598,242735,243943,245156,246142,247294,248434,249558,250855,251695,252827,253952,255027,256117,257065,258129,258947,259806,260891,261947,262977,264101,265137,266196,267160,268252,269351,270369,271385,272174,273257,274061,274902,276011,277072,277988,278838,279694,280649,281463,282281,283062,283800,284665,285673,286458,287364,288274,289144,290366,291589,292593,293476,294628,295739,296790,297866,299e3,300066,301081,302225,303224,304332,305618,306805,307938,309020,310174,311317,312427,313749,315117,316322,317676,319009,320028,321315,322581,323853,325154,326326,327252,328476,329795,331099,332421,333553,334749,335971,337020,338421,339629,340761,341750,343048,344064,345253,346346,347321,348526,349767,351070,352158,353119,354278,355760,357237,358544,359566,360393,361192,362566,363816,364963,366299,367536,368972,370450,371819,373119,374211,375287,376425,377603,378840,379945,381136,382074,383420,384677,386148,387472,388885,389970,391316,392574,393846,395129,396352,397705,399137,400356,401552,402900,404232,405258,406446,407774,408883,410341,411624,412662,413882,414592,415621,416731,418047,419230,420151,421202,422292,423702,424864,426299,427349,428496,429632,431001,432386,433481,434814,436025,437179,438609,439761,441048,442390,443684,445e3,446391,447770,449174,450530,451854,453181,454647,456010,457266,458611,460035,461425,462572,463886,465232,466591,467823,469233,470632,471852,473107,474233,475552,476926,478175,479298,480571,481704,483014,484309,485579,486450,487534,488596,490122,491351,492410,493443,494534,495661,497136,498456,499566,500915,502020,503281,504972,507016,508983,510890,512819,514824,516727,518631,520552,522456,524374,526240,528182,530169,532202,534204,536163,538137,540150,542151,544100,546018,547983,549963,551920,553918,555926,557929,559917,561872,563866,565900,567922,569810,571779,573762,575744,577738,579711,581732,583758,585748,587730,589771,591807,593816,595849,597865,599846,601899,603876,605859,607903,609889,611844,613847,615861,617914,619948,621936,623933,625968,627919,629875,631899,633935,635944,637950,639922,641932,643957,645948,647909,649938,651954,653931,655953,657953,659962,661984,663974,665961,667994,670022,671987,673965,675978,678e3,679985,681937,683905,685934,687945,689916,691915,693892,695879,697848,699791,701769,703787,705742,707672,709670,711666,713603,715527,717482,719461,721425,723347,725273,727280,729275,731182,733122,735092,737075,738967,740902,742823,744713,746644,748498,750333,752266,754186,756020,757934,759896,761819,763750,765647,767593,769569,771503,773428,775381,777356,779292,781263,783227,785180,787122,789044,790976,792864,794813,796709,798483,800362,802274,804124,805983,807885,809729,811608,813468,815277,817179,819103,820935,822829,824727,826574,828475,830380,832243,834179,836127,837981,839927,841885,843789,845708,847643,849534,851508,853446,855369,857331,859317,861240,863141,865102,867078,869046,871036,873026,875023,877020,879027,881020,883040,885090,887085,889065,891073,893088,895082,897107,899127,901150,903197,905237,907259,909297,911334,913348,915351,917327,919194,921131,923043,924806,926682,928499,930063,931882,933680,935219,936972,938687,940042,941723,943371,944695,946385,948058,949376,951039,952688,953974,955649,957298,958563,960210,961856,963102,964733,966390,967649,969273,970911,972151,973817,975465,976707,978365,980016,981276,982932,984618,985975,987713,989450,990905,992664,994451,996026,997831,999664,1001310,1003121,1004924,1006525,1008306,1010091,1011706,1013480,1015337,1017030,1018816,1020640,1022307,1024077,1025925,1027689,1029510,1031455,1033349,1035202,1037126,1038981,1040771,1042634,1044363,1046099,1047945,1049587,1051284,1053076,1054597,1056264,1057977,1059333,1060908,1062545,1063838,1065406,1067056,1068349,1069944,1071605,1072942,1074537,1076200,1077519,1079139,1080814,1082171,1083826,1085534,1086946,1088590,1090312,1091806,1093554,1095347,1096935,1098698,1100506,1102125,1103889,1105680,1107292,1109037,1110829,1112387,1114136,1115923,1117544,1119278,1121059,1122741,1124509,1126345,1128046,1129799,1131606,1133271,1134977,1136754,1138389,1140140,1141979,1143660,1145354,1147145,1148766,1150366,1152111,1153671,1155206,1156812,1158302,1159810,1161469,1163012,1164502,1166187,1167731,1169154,1170808,1172336,1173764,1175376,1176806,1177978,1179459,1180819,1181881,1183353,1184728,1185774,1187251,1188646,1189698,1191201,1192634,1193688,1195218,1196738,1197832,1199383,1200958,1202556,1204604,1206652,1208700,1210748,1212796,1214844,1216892,1218940,1220988,1223036,1225084,1227132,1229180,1231228,1233276,1235324,1237372,1239420,1241468,1243516,1245564,1247612,1249660,1251708,1253756,1255804,1257852,1259900,1261948,1263996,1266044,1268092,1270140,1272188,1274236,1276284,1278332,1280380,1282428,1284476,1286524,1288572,1290620,1292668,1294716,1296764,1298812,1300860,1302908,1304956,1307004,1309052,1311100,1313148,1315196,1317244,1319292,1321340,1323388,1325436,1327484,1329532,1331580,1333628,1335676,1337724,1339772,1341820,1343868,1345916,1347964,1350012,1352060,1354108,1356156,1358204,1360252,1362300,1364348,1366396,1368444,1370492,1372540,1374588,1376636,1378684,1380732,1382780,1384828,1386876,1388924,1390972,1393020,1395068,1397116,1399164,1401212,1403260,1405308,1407356,1409404,1411452,1413500,1415548,1417605,1419653,1421701,1423749,1425797,1427845,1429893,1431941,1433989,1436037,1438085,1440133,1442181,1444229,1446277,1448325,1450373,1452421,1454469,1456517,1458565,1460613,1462661,1464709,1466757,1468805,1470853,1472901,1474949,1476997,1479045,1481093,1483141,1485189,1487237,1489285,1491333,1493381,1495429,1497477,1499525,1501573,1503621,1505669,1507717,1509765,1511813,1513861,1515909,1517957,1520005,1522053,1524101,1526149,1528197,1530245,1532293,1534341,1536389,1538437,1540485,1542533,1544581,1546629,1548677,1550725,1552773,1554821,1556869,1558917,1560965,1563013,1565061,1567109,1569157,1571205,1573253,1575301,1577349,1579397,1581445,1583493,1585541,1587589,1589637,1591685,1593733,1595781,1597829,1599877,1601925,1603973,1606021,1608069,1610117,1612165,1614213,1616261,1618309,1620357,1622405,1624453,1626501,1628549,1630597,1632645,1634693,1636741,1638789,1640837,1642885,1644895,1646943,1648991,1651039,1653035,1655083,1657077,1659125,1661173,1663067,1665115,1667163,1669211,1671206,1673095,1675143,1677137,1679133,1681023,1682812,1684802,1686789,1688673,1690662,1692657,1694545,1696540,1698588,1700470,1702465,1704460,1706347,1708337,1710226,1712220,1714268,1716256,1718143,1720133,1722020,1724012,1725831,1727797,1729793,1731689,1733680,1735568,1737561,1739554,1741503,1743523,1745515,1747408,1749400,1751448,1753437,1755325,1757314,1759201,1761192,1763185,1765233,1767118,1769113,1771106,1773001,1774993,1777041,1778932,1780930,1782978,1784966,1786855,1788852,1790900,1792789,1794783,1796831,1798822,1800857,1802813,1804806,1806854,1808743,1810735,1812783,1814771,1816663,1818711,1820700,1822689,1824584,1826575,1828623,1830508,1832504,1834552,1836543,1838591,1840479,1842527,1844516,1846564,1848456,1850504,1852495,1854487,1856535,1858424,1860421,1862469,1864355,1866346,1868339,1870226,1872274,1874161,1876209,1878091,1880081,1881971,1883962,1885738,1887732,1889622,1891670,1893659,1895652,1897700,1899589,1901637,1903627,1905521,1907569,1909560,1911608,1913597,1915547,1917568,1919559,1921550,1923545,1925539,1927531,1929421,1931469,1933458,1935448,1937496,1939379,1941371,1943254,1945249,1947246,1949294,1951185,1953179,1955227,1957110,1959100,1961091,1962982,1964975,1966969,1968857,1970847,1972740,1974731,1976723,1978607,1980603,1982651,1984431,1986426,1988313,1990361,1992353,1994243,1996291,1998279,2000269,2002157,2004205,2006193,2008183,2010231,2012114,2014106,2016e3,2018048,2020041,2022089,2024137,2026126,2028174,2030059,2031846,2033836,2035828,2037876,2039765,2041756,2043645,2045645,2047693,2049683,2051731,2053620,2055668,2057656,2059647,2061695,2063579,2065627,2067617,2069508,2071503,2073496,2075389,2077437,2079427,2081322,2083370,2085360,2087408,2089396,2091290,2093338,2095329,2097218,2099215,2101263,2103251,2105137,2107129,2109120,2111014,2113062,2115050,2116933,2118925,2120959,2122816,2124619,2126613,2128606,2130409,2132305,2134300,2136289,2138178,2140172,2142162,2144047,2146037,2147926,2149921,2151917,2153965,2155856,2157850,2159738,2161786,2163689,2165737,2167519,2169409,2171404,2173206,2175251,2177177,2178955,2180851,2182843,2184632,2186680,2188480,2190528,2192517,2194512,2196401,2198449,2200437,2202330,2204376,2206393,2208387,2210435,2212318,2214312,2216360,2218348,2220235,2222231,2224279,2226166,2228156,2230151,2232043,2234036,2235923,2237912,2239905,2241792,2243784,2245673,2247662,2249651,2251537,2253528,2255515,2257398,2259186,2261234,2263116,2265006,2267054,2269043,2270936,2272928,2274921,2276817,2278815,2280863,2282751,2284746,2286794,2288786,2290834,2292722,2294770,2296759,2298807,2300797,2302693,2304741,2306730,2308626,2310618,2312666,2314659,2316551,2318599,2320590,2322486,2324534,2326523,2328571,2330558,2332452,2334500,2336491,2337828,2339783,2340925,2342123,2344069,2346117,2348165,2350213,2352261,2354309,2356357,2358405,2360401,2362449,2364497,2366545,2368593,2370641,2372689,2374737,2376745,2378793,2380841,2382889,2384937,2386985,2389033,2391081,2393047,2395095,2397143,2399191,2401239,2403287,2405335,2407383,2409346,2411394,2413442,2415490,2417538,2419586,2421634,2423682,2425661,2427709,2429757,2431805,2433853,2435901,2437949,2439997,2441990,2444038,2446086,2448134,2450182,2452230,2454278,2456326,2458292,2460340,2462388,2464436,2466484,2468532,2470580,2472628,2474604,2476652,2478700,2480748,2482796,2484844,2486892,2488940,2490912,2492960,2495008,2497056,2499104,2501152,2503200,2505248,2507251,2509299,2511347,2513395,2515443,2517491,2519539,2521498,2523546,2525594,2527642,2529690,2531738,2533786,2535834,2537810,2539858,2541906,2543954,2546002,2548050,2550098,2552146,2554124,2556172,2558220,2560268,2562316,2564364,2566412,2568460,2570428,2572476,2574524,2576572,2578620,2580668,2582716,2584764,2586722,2588770,2590818,2592866,2594914,2596962,2599010,2601058,2603017,2605065,2607113,2609161,2611209,2613257,2615305,2617353,2619318,2621366,2623414,2625462,2627510,2629558,2631606,2633654,2635702,2637750,2639798,2641846,2643894,2645942,2647990,2649956,2652004,2654052,2656100,2658148,2660196,2662244,2664292,2666286,2668334,2670382,2672430,2674478,2676526,2678574,2680622,2682599,2684647,2686695,2688743,2690791,2692839,2694887,2696935,2698886,2700934,2702982,2705030,2707078,2709126,2711174,2713222,2715169,2717217,2719265,2721313,2723361,2725409,2727457,2729505,2731470,2733518,2735566,2737614,2739662,2741710,2743758,2745806,2747765,2749813,2751861,2753909,2755957,2758005,2760053,2762101,2764093,2766141,2768189,2770237,2772285,2774333,2776381,2778429,2780422,2782470,2784518,2786566,2788614,2790662,2792710,2794758,2796729,2798777,2800825,2802873,2804921,2806969,2809017,2811065,2813022,2815070,2817118,2819166,2821214,2823262,2825310,2827275,2829323,2831371,2833419,2835467,2837515,2839563,2841611,2843565,2845613,2847661,2849709,2851757,2853805,2855853,2857901,2859892,2861940,2863988,2866036,2868084,2870132,2872180,2874228,2876220,2878268,2880316,2882364,2884412,2886460,2888508,2890556,2892541,2894589,2896637,2898685,2900733,2902781,2904829,2906877,2908863,2910911,2912959,2915007,2917055,2919103,2921151,2923199,2925247,2927295,2929343,2931391,2933439,2935487,2937535,2939583,2941631,2943679,2945727,2947775,2949823,2951871,2953919,2955967,2958015,2960063,2962111,2964159,2966207,2968255,2970303,2972351,2974399,2976447,2978495,2980543,2982591,2984639,2986687,2988735,2990783,2992831,2994879,2996927,2998975,3000942,3002996,3005050,3007106,3009161,3011218,3013266,3015314,3017362,3019209,3021041,3023089,3025137,3027185,3029233,3031281,3033329,3035377,3037425,3039473,3041521,3043569,3045617,3047665,3049713,3051761,3053809,3055857,3057905,3059953,3062001,3064049,3066097,3068145,3070193,3072241,3074289,3076337,3078385,3080433,3082481,3084529,3086577,3088625,3090673,3092721,3094769,3096817,3098865,3100913,3102961,3105009,3107057,3109105,3111153,3113201,3115249,3117297,3119345,3121393,3123441,3125489,3127537,3129585,3131633,3133681,3135729,3137777,3139825,3141873,3143921,3145969,3148017,3150065,3152113,3154161,3156209,3158257,3160305,3162353,3164401,3166449,3168497,3170545,3172593,3174641,3176689,3178737,3180785,3182833,3184881,3186929,3188977,3191025,3193073,3195121,3197169,3199217,3201265,3203313,3205361,3207409,3209457,3211505,3213553,3215601,3217649,3219697,3221745,3223793,3225841,3227889,3229937,3231985,3234033,3236081,3238129,3240177,3242225,3244273,3246321,3248369,3250417,3252465,3254513,3256561,3258609,3260657,3262705,3264753,3266801,3268849,3270897,3272945,3274993,3277041,3279089,3281137,3283185,3285233,3287281,3289329,3291377,3293425,3295473,3297521,3299569,3301617,3303665,3305713,3307761,3309809,3311857,3313905,3315953,3318001,3320049,3322097,3324145,3326193,3328241,3330289,3332337,3334385,3336433,3338481,3340529,3342577,3344625,3346673,3348721,3350769,3352817,3354865,3356913,3358961,3361009,3363057,3365105,3367153,3369201,3371249,3373297,3375345,3377393,3379441,3381489,3383537,3385585,3387633,3389681,3391729,3393777,3395825,3397873,3399921,3401969,3404017,3406065,3408113,3410161,3412209,3414257,3416305,3418353,3420401,3422449,3424497,3426545,3428593,3430641,3432689,3434737,3436785,3438833,3440881,3442929,3444977,3447025,3449073,3451121,3453169,3455217,3457265,3459313,3461361,3463409,3465457,3467505,3469553,3471601,3473649,3475697,3477745,3479793,3481841,3483889,3485937,3487985,3490033,3492081,3494129,3496177,3498225,3500273,3502321,3504369,3506417,3508465,3510513,3512561,3514609,3516657,3518705,3520753,3522801,3524849,3526897,3528945,3530993,3533041,3535089,3537137,3539185,3541233,3543281,3545329,3547377,3549425,3551473,3553521,3555569,3557617,3559665,3561713,3563761,3565809,3567857,3569905,3571953,3574001,3576049,3578097,3580145,3582193,3584241,3586289,3588337,3590385,3592433,3594481,3596529,3598577,3600625,3602673,3604721,3606769,3608817,3610865,3612913,3614961,3617009,3619057,3621105,3623153,3625201,3627249,3629297,3631345,3633393,3635441,3637489,3639537,3641585,3643633,3645681,3647729,3649777,3651825,3653873,3655921,3657969,3660017,3662065,3664113,3666161,3668209,3670257,3672305,3674353,3676401,3678449,3680497,3682545,3684593,3686641,3688689,3690737,3692785,3694833,3696881,3698929,3700977,3703025,3705073,3707121,3709169,3711217,3713265,3715313,3717361,3719409,3721457,3723505,3725553,3727601,3729649,3731697,3733745,3735793,3737841,3739889,3741937,3743985,3746033,3748081,3750129,3752177,3754225,3756273,3758321,3760369,3762417,3764465,3766513,3768561,3770609,3772657,3774705,3776753,3778801,3780849,3782897,3784945,3786993,3789041,3791089,3793137,3795185,3797233,3799281,3801329,3803377,3805425,3807473,3809521,3811569,3813617,3815665,3817713,3819761,3821809,3823857,3825905,3827953,3830001,3832049,3834097,3836145,3838193,3840241,3842289,3844337,3845576,3846587],sizes:[1445,1230,1274,1410,1226,1405,1202,1173,1337,1496,1253,1314,1452,1376,1068,879,1035,1202,1063,983,1244,1205,1190,1177,789,1272,1587,1366,1243,1192,1508,1363,1280,1493,1369,1527,1470,1205,844,1057,1065,1205,1183,1138,1184,1121,1429,1071,1360,1316,1118,1016,1310,1242,1279,1529,1271,1492,1457,1211,1020,934,1151,748,1137,1202,977,1195,999,890,1092,1170,1267,1084,1243,1368,1078,1121,1041,1147,1100,1173,1172,1087,1076,1323,1182,1343,1348,1405,1304,1e3,1207,1098,1144,1159,907,884,1186,1208,1091,1128,966,1087,959,934,958,1083,1175,1284,1095,952,1003,926,1231,1666,1235,1273,1380,1001,1167,1129,1088,1276,1218,1379,1428,1475,1713,1526,1407,1488,1431,1489,1502,1403,1356,1364,1206,1371,1293,986,1292,1258,1262,1194,1014,933,1075,729,942,1072,943,1023,944,944,1116,831,886,1152,948,1154,1455,1104,1120,1239,1085,954,1025,1119,1070,1072,976,1001,799,1054,1095,1108,1178,1011,926,976,821,1201,1033,1041,1120,1387,1217,908,683,940,1231,1066,1092,953,1099,990,1053,1203,1011,1075,804,811,767,1199,1158,1163,1137,1208,1213,986,1152,1140,1124,1297,840,1132,1125,1075,1090,948,1064,818,859,1085,1056,1030,1124,1036,1059,964,1092,1099,1018,1016,789,1083,804,841,1109,1061,916,850,856,955,814,818,781,738,865,1008,785,906,910,870,1222,1223,1004,883,1152,1111,1051,1076,1134,1066,1015,1144,999,1108,1286,1187,1133,1082,1154,1143,1110,1322,1368,1205,1354,1333,1019,1287,1266,1272,1301,1172,926,1224,1319,1304,1322,1132,1196,1222,1049,1401,1208,1132,989,1298,1016,1189,1093,975,1205,1241,1303,1088,961,1159,1482,1477,1307,1022,827,799,1374,1250,1147,1336,1237,1436,1478,1369,1300,1092,1076,1138,1178,1237,1105,1191,938,1346,1257,1471,1324,1413,1085,1346,1258,1272,1283,1223,1353,1432,1219,1196,1348,1332,1026,1188,1328,1109,1458,1283,1038,1220,710,1029,1110,1316,1183,921,1051,1090,1410,1162,1435,1050,1147,1136,1369,1385,1095,1333,1211,1154,1430,1152,1287,1342,1294,1316,1391,1379,1404,1356,1324,1327,1466,1363,1256,1345,1424,1390,1147,1314,1346,1359,1232,1410,1399,1220,1255,1126,1319,1374,1249,1123,1273,1133,1310,1295,1270,871,1084,1062,1526,1229,1059,1033,1091,1127,1475,1320,1110,1349,1105,1261,1691,2044,1967,1907,1929,2005,1903,1904,1921,1904,1918,1866,1942,1987,2033,2002,1959,1974,2013,2001,1949,1918,1965,1980,1957,1998,2008,2003,1988,1955,1994,2034,2022,1888,1969,1983,1982,1994,1973,2021,2026,1990,1982,2041,2036,2009,2033,2016,1981,2053,1977,1983,2044,1986,1955,2003,2014,2053,2034,1988,1997,2035,1951,1956,2024,2036,2009,2006,1972,2010,2025,1991,1961,2029,2016,1977,2022,2e3,2009,2022,1990,1987,2033,2028,1965,1978,2013,2022,1985,1952,1968,2029,2011,1971,1999,1977,1987,1969,1943,1978,2018,1955,1930,1998,1996,1937,1924,1955,1979,1964,1922,1926,2007,1995,1907,1940,1970,1983,1892,1935,1921,1890,1931,1854,1835,1933,1920,1834,1914,1962,1923,1931,1897,1946,1976,1934,1925,1953,1975,1936,1971,1964,1953,1942,1922,1932,1888,1949,1896,1774,1879,1912,1850,1859,1902,1844,1879,1860,1809,1902,1924,1832,1894,1898,1847,1901,1905,1863,1936,1948,1854,1946,1958,1904,1919,1935,1891,1974,1938,1923,1962,1986,1923,1901,1961,1976,1968,1990,1990,1997,1997,2007,1993,2020,2050,1995,1980,2008,2015,1994,2025,2020,2023,2047,2040,2022,2038,2037,2014,2003,1976,1867,1937,1912,1763,1876,1817,1564,1819,1798,1539,1753,1715,1355,1681,1648,1324,1690,1673,1318,1663,1649,1286,1675,1649,1265,1647,1646,1246,1631,1657,1259,1624,1638,1240,1666,1648,1242,1658,1651,1260,1656,1686,1357,1738,1737,1455,1759,1787,1575,1805,1833,1646,1811,1803,1601,1781,1785,1615,1774,1857,1693,1786,1824,1667,1770,1848,1764,1821,1945,1894,1853,1924,1855,1790,1863,1729,1736,1846,1642,1697,1792,1521,1667,1713,1356,1575,1637,1293,1568,1650,1293,1595,1661,1337,1595,1663,1319,1620,1675,1357,1655,1708,1412,1644,1722,1494,1748,1793,1588,1763,1808,1619,1764,1791,1612,1745,1792,1558,1749,1787,1621,1734,1781,1682,1768,1836,1701,1753,1807,1665,1706,1777,1635,1751,1839,1681,1694,1791,1621,1600,1745,1560,1535,1606,1490,1508,1659,1543,1490,1685,1544,1423,1654,1528,1428,1612,1430,1172,1481,1360,1062,1472,1375,1046,1477,1395,1052,1503,1433,1054,1530,1520,1094,1551,1575,1598,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2010,2048,2048,2048,1996,2048,1994,2048,2048,1894,2048,2048,2048,1995,1889,2048,1994,1996,1890,1789,1990,1987,1884,1989,1995,1888,1995,2048,1882,1995,1995,1887,1990,1889,1994,2048,1988,1887,1990,1887,1992,1819,1966,1996,1896,1991,1888,1993,1993,1949,2020,1992,1893,1992,2048,1989,1888,1989,1887,1991,1993,2048,1885,1995,1993,1895,1992,2048,1891,1998,2048,1988,1889,1997,2048,1889,1994,2048,1991,2035,1956,1993,2048,1889,1992,2048,1988,1892,2048,1989,1989,1895,1991,2048,1885,1996,2048,1991,2048,1888,2048,1989,2048,1892,2048,1991,1992,2048,1889,1997,2048,1886,1991,1993,1887,2048,1887,2048,1882,1990,1890,1991,1776,1994,1890,2048,1989,1993,2048,1889,2048,1990,1894,2048,1991,2048,1989,1950,2021,1991,1991,1995,1994,1992,1890,2048,1989,1990,2048,1883,1992,1883,1995,1997,2048,1891,1994,2048,1883,1990,1991,1891,1993,1994,1888,1990,1893,1991,1992,1884,1996,2048,1780,1995,1887,2048,1992,1890,2048,1988,1990,1888,2048,1988,1990,2048,1883,1992,1894,2048,1993,2048,2048,1989,2048,1885,1787,1990,1992,2048,1889,1991,1889,2e3,2048,1990,2048,1889,2048,1988,1991,2048,1884,2048,1990,1891,1995,1993,1893,2048,1990,1895,2048,1990,2048,1988,1894,2048,1991,1889,1997,2048,1988,1886,1992,1991,1894,2048,1988,1883,1992,2034,1857,1803,1994,1993,1803,1896,1995,1989,1889,1994,1990,1885,1990,1889,1995,1996,2048,1891,1994,1888,2048,1903,2048,1782,1890,1995,1802,2045,1926,1778,1896,1992,1789,2048,1800,2048,1989,1995,1889,2048,1988,1893,2046,2017,1994,2048,1883,1994,2048,1988,1887,1996,2048,1887,1990,1995,1892,1993,1887,1989,1993,1887,1992,1889,1989,1989,1886,1991,1987,1883,1788,2048,1882,1890,2048,1989,1893,1992,1993,1896,1998,2048,1888,1995,2048,1992,2048,1888,2048,1989,2048,1990,1896,2048,1989,1896,1992,2048,1993,1892,2048,1991,1896,2048,1989,2048,1987,1894,2048,1991,1337,1955,1142,1198,1946,2048,2048,2048,2048,2048,2048,2048,1996,2048,2048,2048,2048,2048,2048,2048,2008,2048,2048,2048,2048,2048,2048,2048,1966,2048,2048,2048,2048,2048,2048,2048,1963,2048,2048,2048,2048,2048,2048,2048,1979,2048,2048,2048,2048,2048,2048,2048,1993,2048,2048,2048,2048,2048,2048,2048,1966,2048,2048,2048,2048,2048,2048,2048,1976,2048,2048,2048,2048,2048,2048,2048,1972,2048,2048,2048,2048,2048,2048,2048,2003,2048,2048,2048,2048,2048,2048,1959,2048,2048,2048,2048,2048,2048,2048,1976,2048,2048,2048,2048,2048,2048,2048,1978,2048,2048,2048,2048,2048,2048,2048,1968,2048,2048,2048,2048,2048,2048,2048,1958,2048,2048,2048,2048,2048,2048,2048,1959,2048,2048,2048,2048,2048,2048,2048,1965,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1966,2048,2048,2048,2048,2048,2048,2048,1994,2048,2048,2048,2048,2048,2048,2048,1977,2048,2048,2048,2048,2048,2048,2048,1951,2048,2048,2048,2048,2048,2048,2048,1947,2048,2048,2048,2048,2048,2048,2048,1965,2048,2048,2048,2048,2048,2048,2048,1959,2048,2048,2048,2048,2048,2048,2048,1992,2048,2048,2048,2048,2048,2048,2048,1993,2048,2048,2048,2048,2048,2048,2048,1971,2048,2048,2048,2048,2048,2048,2048,1957,2048,2048,2048,2048,2048,2048,1965,2048,2048,2048,2048,2048,2048,2048,1954,2048,2048,2048,2048,2048,2048,2048,1991,2048,2048,2048,2048,2048,2048,2048,1992,2048,2048,2048,2048,2048,2048,2048,1985,2048,2048,2048,2048,2048,2048,2048,1986,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1967,2054,2054,2056,2055,2057,2048,2048,2048,1847,1832,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1239,1011,699],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,1,0,0,0,1,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,0,1,1,0,1,1,1,0,1,1,0,1,1,1,1,0,1,1,0,1,1,0,1,1,1,1,0,1,1,0,1,0,1,0,1,0,1,0,1,1,0,1,1,0,1,1,1,1,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,0,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,1,1,1,0,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0,1,1,0,1,1,1,0,1,1,0,1,1,1,0,1,0,0,1,0,1,1,1,1,0,1,1,1,1,0,1,0,1,0,1,1,0,1,0,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0,1,0,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,1,0,1,1,1,1,1,0,1,1,0,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,1,1,0,1,1,1,0,1,1,0,1,1,0,1,0,1,1,0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_imageio.data")}Module["addRunDependency"]("datafile_imageio.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/imageio/__init__.py",start:0,end:1273,audio:0},{filename:"/lib/python3.9/site-packages/imageio/__main__.py",start:1273,end:6672,audio:0},{filename:"/lib/python3.9/site-packages/imageio/freeze.py",start:6672,end:6842,audio:0},{filename:"/lib/python3.9/site-packages/imageio/testing.py",start:6842,end:10261,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/__init__.py",start:10261,end:10900,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/fetching.py",start:10900,end:20087,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/findlib.py",start:20087,end:25631,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/format.py",start:25631,end:51635,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/functions.py",start:51635,end:72856,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/request.py",start:72856,end:94046,audio:0},{filename:"/lib/python3.9/site-packages/imageio/core/util.py",start:94046,end:112709,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/__init__.py",start:112709,end:116383,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/_bsdf.py",start:116383,end:149354,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/_dicom.py",start:149354,end:183289,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/_freeimage.py",start:183289,end:235123,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/_swf.py",start:235123,end:260881,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/_tifffile.py",start:260881,end:628281,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/bsdf.py",start:628281,end:639703,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/dicom.py",start:639703,end:651893,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/example.py",start:651893,end:657589,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/feisem.py",start:657589,end:660981,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/ffmpeg.py",start:660981,end:689628,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/fits.py",start:689628,end:694403,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/freeimage.py",start:694403,end:713290,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/freeimagemulti.py",start:713290,end:725035,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/gdal.py",start:725035,end:726746,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/grab.py",start:726746,end:729996,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/lytro.py",start:729996,end:754704,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/npz.py",start:754704,end:757826,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/pillow.py",start:757826,end:790828,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/pillow_info.py",start:790828,end:828227,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/pillowmulti.py",start:828227,end:840798,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/simpleitk.py",start:840798,end:845147,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/spe.py",start:845147,end:860588,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/swf.py",start:860588,end:872883,audio:0},{filename:"/lib/python3.9/site-packages/imageio/plugins/tifffile.py",start:872883,end:884319,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/shipped_resources_go_here",start:884319,end:884319,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/astronaut.png",start:884319,end:1675874,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/chelsea.png",start:1675874,end:1897168,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/chelsea.zip",start:1897168,end:2118486,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/cockatoo.mp4",start:2118486,end:2847237,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/newtonscradle.gif",start:2847237,end:3430611,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/realshort.mp4",start:3430611,end:3527433,audio:0},{filename:"/lib/python3.9/site-packages/imageio/resources/images/stent.npz",start:3527433,end:4352045,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/PKG-INFO",start:4352045,end:4354297,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/dependency_links.txt",start:4354297,end:4354298,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/entry_points.txt",start:4354298,end:4354429,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/requires.txt",start:4354429,end:4354547,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/top_level.txt",start:4354547,end:4354555,audio:0},{filename:"/lib/python3.9/site-packages/imageio-2.9.0-py3.9.egg-info/SOURCES.txt",start:4354555,end:4355990,audio:0},{filename:"/bin/imageio_download_bin",start:4355990,end:4356979,audio:0},{filename:"/bin/imageio_remove_bin",start:4356979,end:4357964,audio:0}],remote_package_size:3851382,package_uuid:"41bc991e-008d-41a2-83ce-fe83b9bbabb9"})})(); \ No newline at end of file diff --git a/spaces/pyodide-demo/self-hosted/matplotlib.js b/spaces/pyodide-demo/self-hosted/matplotlib.js deleted file mode 100644 index 6803286b8cb962bf92b581c79da0bd609e31b6ff..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/matplotlib.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="matplotlib.data";var REMOTE_PACKAGE_BASE="matplotlib.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","matplotlib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","axes",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","backends",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","cbook",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","compat",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","projections",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","sphinxext",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","style",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","testing",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/testing","jpl_units",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","tri",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib","mpl-data",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data","fonts",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data/fonts","ttf",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data/fonts","afm",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data/fonts","pdfcorefonts",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data","images",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data","stylelib",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data","sample_data",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data","axes_grid",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","mpl_toolkits",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/mpl_toolkits","axes_grid",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/mpl_toolkits","axes_grid1",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/mpl_toolkits","axisartist",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages/mpl_toolkits","mplot3d",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","matplotlib-3.3.3-py3.9.egg-info",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:11509674,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1320,2779,4196,5551,6872,8028,9120,10291,11561,12849,14090,14840,16129,17473,18629,19901,21340,22684,23778,25240,26520,27681,28976,30313,31404,32756,33466,34518,35748,36630,38093,39087,39839,40583,41366,42181,42973,43796,44538,45445,46316,47071,47758,48544,49362,50103,50876,51622,52426,53263,54002,54794,56133,57260,58266,59235,60066,61081,61978,62887,63866,64866,65897,66883,67908,68875,69833,70829,71800,72783,73756,74724,75667,76700,77662,78684,79672,80647,81616,82633,83596,84540,85430,86411,87420,88330,89275,90205,91113,92065,93054,94279,95887,97499,99119,100708,102309,103907,105514,107149,108771,110394,112053,113639,115239,116847,118450,120040,121645,122670,123618,124584,125536,126476,127555,128936,130149,131357,132593,133799,135023,136247,137427,138632,139820,141015,142214,143417,144607,145637,146625,147690,149093,150524,151953,153166,154446,155698,156815,157709,158611,159678,160745,161796,162741,164079,165542,166557,167499,168441,169267,170115,170963,172106,173101,173887,174782,175976,176542,177124,177691,178351,178923,179618,180245,180871,181489,182417,183397,184360,185361,186214,187076,187957,188898,189832,190729,191669,192521,193413,194285,195226,195970,196663,197492,198744,199827,201140,202786,204119,205323,206717,207886,209098,210053,210969,212471,213807,215102,216305,217688,219009,220258,221568,222827,224125,225499,226831,228038,229407,230589,231795,233044,234311,235627,236756,238104,239301,240540,241684,242923,244129,245419,246655,247994,249272,250536,251731,252831,254172,255394,256656,257916,258946,260046,261264,262391,263629,264791,265842,267008,268040,269144,270214,271382,272565,273814,275092,276452,277750,279058,279961,281093,282488,283900,285265,286275,287081,288190,289299,290309,291302,292281,293296,294445,295607,296763,298003,299183,300075,301214,302232,303431,304358,305551,306712,307652,308495,309685,310469,311568,312488,313590,314604,315545,316718,317913,319010,320161,321234,322319,323403,324453,325317,326391,327602,328835,329758,330724,331603,332952,334016,335240,336443,337691,338726,339893,341137,342166,343336,344555,345764,346924,348123,349293,350500,351694,352805,354138,355301,356546,357757,358926,360055,361438,362850,364263,365699,366928,368214,369313,370256,371009,372043,373280,374453,375666,376928,377792,378844,379994,381270,382419,383561,384362,385276,386572,387661,388961,390304,391555,392739,393935,395012,396093,397356,398400,399638,400885,402060,403122,404306,405557,406731,407820,408929,410217,411259,412358,413679,414969,416208,417267,418330,419447,420372,421268,422348,423452,424392,425383,426652,427661,428764,430002,431188,432577,433999,435196,436330,437622,438833,440071,441326,442504,443709,444995,446286,447252,448296,449448,450830,452086,453170,454407,455811,456826,458171,459523,460833,461964,463105,464083,465509,466934,467942,469263,470360,471466,472701,473793,474833,476072,477324,478278,479434,480556,481659,482776,483850,485064,486431,487631,488867,490020,491212,492400,493630,494813,496062,497157,498186,499219,500137,501255,502518,503699,505005,506283,507386,508518,509838,510919,512137,513473,514984,516063,517473,518560,519701,520922,521832,522957,524176,525224,526412,527609,528644,529556,530720,531617,532799,534155,535348,536651,537965,539202,540019,541263,542550,543691,544992,546188,547658,549132,550407,551667,552682,554029,555464,556923,558376,559640,560928,561830,562999,564169,565359,566622,567830,569129,570249,571347,572496,573737,574988,576096,577389,578588,579868,580970,582193,583472,584752,586083,587268,588453,589697,590857,591901,593208,594519,595618,596809,598170,599494,600625,601871,603026,604433,605702,606788,608056,609320,610515,611921,613210,614376,615569,616654,617766,618922,620046,621266,622381,623646,624741,625928,627133,628299,629426,630449,631693,633032,634246,635511,636811,638347,639852,641170,642519,643919,645306,646624,647609,648919,650107,651417,652489,653679,654911,656207,657306,658395,659529,660725,661825,663111,664296,665524,666897,667941,669145,670432,671512,672246,673497,674600,675727,676914,678209,679727,680794,682067,683402,684589,685698,686634,687592,689020,690307,691678,692700,694114,695507,696860,698139,699236,700605,702072,703446,704855,706107,706992,708210,709387,710711,712067,713313,714347,715547,716623,717702,718683,719917,721176,722102,723008,724259,725521,726750,727961,729246,730467,731707,732985,734137,735391,736642,737684,738771,739719,740954,742125,743275,744471,745729,747069,748444,749489,750813,751937,752895,754076,755188,756369,757594,758826,760027,761231,762354,763703,765223,766324,767541,768863,770190,771406,772567,773783,775012,776136,777555,778748,779778,780881,781929,783167,784634,785832,787021,787951,789269,790627,791635,793062,794202,795100,796259,797753,798851,799850,800906,802058,803250,804479,805742,806738,807809,809036,810180,811462,812507,813716,814836,815909,816591,817440,818572,819936,821212,822401,823662,824947,826202,827405,828605,829707,830761,831858,833068,834307,835622,836906,837986,839437,840589,841754,842973,844188,845384,846679,847866,849079,850235,851620,852986,854459,855789,857124,858225,859639,860801,862046,863369,864559,865772,866808,867997,869186,870281,871422,872575,873726,874700,875931,877054,877993,879067,880058,881206,882400,883589,884710,885916,887160,888149,889219,890213,891245,892200,893109,893979,894793,895845,896920,898073,899184,900596,901978,903085,904324,905356,906073,907160,908382,909481,910702,911920,912961,914092,915006,915962,916817,917574,918675,919794,920727,921699,922408,923455,924799,925996,926888,928085,928872,930017,931044,931989,933088,934290,935100,935956,936696,937482,938146,938776,939704,941089,942479,943548,944657,945591,946405,947704,948912,950099,951149,952386,953493,954198,955405,956508,957652,958679,959792,961161,962293,963309,964742,965576,966740,967990,968979,970181,971329,972408,973538,974674,975897,976806,978125,979450,980306,981226,982386,983430,984433,985491,986307,987255,988476,989551,990705,991828,992853,993838,994765,995859,996864,997824,998691,999900,1001008,1002308,1003247,1004476,1005748,1006934,1008072,1009308,1010588,1011940,1013172,1014435,1015822,1017206,1018503,1019851,1020844,1022007,1023346,1024614,1025764,1027016,1028467,1029664,1030931,1032134,1033039,1034282,1035580,1036712,1037830,1039022,1040124,1041249,1042297,1043407,1044599,1045793,1046972,1048069,1049188,1050362,1051469,1052699,1053687,1054798,1055795,1056951,1058162,1059107,1060079,1061207,1062344,1063506,1064620,1065587,1066474,1067543,1068688,1069892,1071229,1072370,1073544,1074288,1075411,1076333,1077431,1078730,1079815,1081018,1082101,1083301,1084321,1085485,1086787,1088179,1089433,1090537,1091770,1093127,1094420,1095637,1096435,1097318,1098310,1099270,1100235,1100883,1102154,1103263,1104361,1105325,1106392,1107124,1108242,1109525,1110855,1112001,1113093,1113970,1114706,1115239,1116175,1116966,1117606,1118700,1119393,1120329,1121255,1122359,1123514,1124579,1125299,1126582,1127676,1128701,1129688,1130675,1131702,1132680,1133875,1134909,1135977,1137245,1138513,1139777,1140935,1141932,1143138,1144437,1145624,1146692,1147916,1148953,1150125,1151324,1152432,1153335,1154293,1155190,1156553,1157829,1159193,1160491,1161513,1162666,1163935,1165173,1166405,1167882,1169185,1170548,1171677,1173016,1174231,1175394,1176384,1177593,1178803,1180116,1181369,1182639,1183793,1185120,1186487,1187887,1189192,1190544,1191977,1193237,1194427,1195778,1197102,1198281,1199569,1200738,1201907,1203164,1204366,1205712,1207056,1208068,1209205,1210334,1211542,1213033,1214335,1215771,1217025,1217794,1218386,1219342,1220019,1220928,1221745,1222722,1223419,1224376,1225147,1226100,1227008,1227865,1228775,1229538,1229929,1230343,1231202,1232674,1234173,1235512,1236762,1238e3,1239005,1240295,1241714,1242997,1244349,1245319,1246537,1247802,1249046,1250506,1251890,1253430,1254721,1256019,1257371,1258655,1259797,1260972,1262252,1263478,1264613,1265739,1266856,1267921,1269108,1270340,1271641,1272714,1273732,1274942,1276139,1277336,1278665,1279989,1281343,1282475,1283208,1284082,1285235,1286387,1287383,1288111,1289061,1290358,1291477,1292792,1294186,1295322,1296584,1297563,1298414,1299090,1300367,1301624,1302796,1303726,1304734,1305508,1306120,1306879,1307965,1309012,1310164,1311374,1312660,1313814,1315062,1316321,1317548,1318773,1319847,1321029,1322104,1323441,1324752,1326011,1327196,1328375,1329398,1330404,1331328,1332451,1333471,1334571,1335636,1337106,1338256,1339594,1340904,1342082,1343324,1344505,1345586,1346864,1348003,1349535,1350730,1351943,1353285,1354426,1355576,1356849,1358061,1359306,1360416,1361549,1362749,1363771,1365057,1366313,1367345,1368805,1369853,1370962,1372298,1373636,1374693,1376006,1377329,1378663,1379861,1381048,1382187,1383423,1384480,1385481,1386576,1387621,1388596,1389716,1390706,1391287,1392431,1393619,1394674,1395623,1396509,1397706,1398934,1399949,1400963,1402005,1403145,1404199,1405301,1406020,1407035,1408154,1409272,1410439,1411603,1412759,1413941,1414953,1416059,1417281,1418607,1419824,1421144,1422507,1423735,1424789,1426068,1427280,1428466,1429755,1430913,1431848,1433094,1434115,1435208,1436391,1437517,1438774,1440072,1441244,1442454,1443563,1444803,1445847,1446956,1448115,1449332,1450624,1451928,1452977,1454475,1455858,1457026,1458210,1459341,1460497,1461761,1463049,1464115,1465261,1466496,1467483,1468698,1469831,1470969,1472224,1473354,1474575,1475811,1476925,1477941,1479232,1480354,1481653,1482973,1484360,1485507,1486419,1487553,1488848,1489868,1491286,1492730,1493988,1495272,1496449,1497047,1497949,1498958,1500145,1501283,1502477,1503641,1505027,1506075,1506977,1507720,1508818,1509887,1510478,1511706,1513040,1514365,1515572,1516510,1517593,1518717,1519995,1521018,1522104,1523308,1524297,1524964,1525826,1526741,1527745,1528870,1530092,1531158,1532103,1533305,1534378,1535284,1536429,1537600,1538575,1539943,1540821,1542330,1543604,1544899,1545966,1547124,1548183,1549485,1550708,1551923,1553182,1554597,1555861,1557059,1558229,1559330,1560305,1561087,1562183,1563416,1564612,1565898,1567029,1568142,1569253,1570418,1571553,1572789,1574028,1575080,1575835,1576893,1578042,1578975,1580041,1581148,1581895,1583126,1584346,1585237,1586261,1587564,1588736,1589813,1590742,1591796,1592732,1593678,1594794,1595944,1597324,1598634,1599782,1600888,1601851,1603154,1604432,1605602,1606877,1607914,1609113,1610669,1612113,1613646,1615074,1616384,1618044,1619476,1620973,1621871,1622696,1623542,1624681,1626141,1627651,1628999,1630260,1631905,1633126,1634400,1635759,1636163,1636188,1636213,1636999,1638252,1639033,1640084,1641101,1642141,1643569,1644705,1646266,1647591,1648896,1650276,1651584,1653189,1654566,1655787,1657051,1658505,1660115,1661736,1663149,1664331,1665820,1667433,1668501,1669889,1671189,1672137,1673672,1674810,1676162,1677582,1678434,1678459,1678484,1678509,1679523,1680695,1681661,1682693,1683245,1683809,1684359,1684905,1685429,1685992,1686544,1687243,1688037,1688824,1689427,1690046,1690651,1691260,1691905,1692523,1693108,1693732,1694355,1694951,1695661,1697154,1698815,1700049,1701098,1701605,1702857,1704274,1705810,1706699,1707749,1708665,1709958,1711373,1712529,1713891,1715318,1716524,1717640,1718598,1719982,1720642,1721898,1722983,1723925,1725184,1726046,1727191,1728492,1729207,1730428,1731546,1732495,1733894,1734550,1735822,1736934,1737888,1739341,1740815,1741808,1742652,1742900,1743445,1744673,1746195,1747189,1748346,1749524,1750713,1751997,1753577,1754923,1756288,1757488,1758165,1759492,1760881,1762262,1762906,1764320,1765590,1766970,1768009,1769314,1770476,1771855,1773319,1774090,1775352,1776780,1778192,1779017,1780321,1781693,1783054,1783609,1784917,1786328,1787745,1788298,1789783,1791043,1792501,1793288,1794782,1796440,1797536,1798879,1800317,1801595,1801620,1801645,1801842,1803157,1804394,1805456,1806449,1807195,1808037,1808711,1810083,1811573,1812908,1814347,1815106,1815718,1817252,1818692,1820129,1821074,1822095,1823064,1824470,1825911,1827283,1828784,1830140,1831521,1832764,1834173,1835644,1836987,1838436,1840016,1841440,1842930,1844295,1845866,1847281,1848755,1850052,1851356,1852758,1853673,1855059,1856267,1857266,1858418,1859818,1861155,1862608,1864102,1865581,1866851,1868429,1870030,1871363,1872608,1873814,1875268,1875522,1875547,1875572,1876775,1878397,1879861,1881230,1882642,1883971,1885658,1887357,1888923,1890451,1892040,1893346,1894890,1896533,1898080,1899665,1901336,1902882,1904479,1906129,1907687,1909173,1910740,1912026,1913304,1914656,1916040,1917347,1918539,1919371,1920259,1920992,1921932,1922508,1923079,1923683,1924236,1924756,1925455,1926032,1926635,1927031,1927549,1928019,1928521,1929174,1930183,1930838,1931403,1932182,1933768,1935250,1936695,1938190,1939743,1941163,1942363,1943051,1943785,1945237,1946729,1948066,1949351,1951025,1952491,1954155,1955637,1957030,1958530,1960102,1961303,1962841,1964278,1965734,1967307,1968905,1970436,1971904,1973538,1975128,1976736,1978400,1979885,1981425,1982972,1984652,1986274,1987917,1989612,1991301,1992777,1994397,1995948,1997514,1999108,2000581,2002125,2003646,2005233,2006885,2008351,2010015,2011638,2013263,2014834,2016561,2018272,2019819,2021295,2022876,2024574,2026162,2027770,2029132,2030677,2032385,2034077,2035679,2037264,2038738,2040306,2041704,2043123,2044010,2044830,2046025,2047093,2048162,2049250,2050238,2051330,2052370,2053386,2054380,2055241,2056405,2057224,2058024,2058831,2059594,2060359,2061201,2062012,2063326,2064827,2066055,2067408,2068978,2070445,2071776,2073064,2074343,2075634,2076909,2078175,2079348,2080431,2081727,2083037,2084313,2085638,2086760,2087875,2089187,2090312,2091335,2092515,2093753,2095134,2096266,2097209,2098365,2099571,2100813,2101966,2103225,2104440,2105465,2106668,2108114,2109456,2110821,2111600,2111625,2111650,2111760,2112951,2114322,2115344,2116362,2117403,2118638,2120161,2121702,2123380,2124969,2126496,2127711,2129259,2130815,2132418,2134035,2135366,2136691,2138231,2139713,2141098,2142694,2143793,2144966,2146313,2147613,2148855,2150048,2150867,2151975,2153076,2153902,2154844,2156072,2157192,2158321,2159641,2160992,2161596,2162698,2163722,2164928,2166240,2167908,2169137,2170577,2172169,2173736,2175205,2176706,2178112,2179504,2181225,2181635,2182038,2182443,2182846,2183255,2183662,2184082,2184543,2184923,2185263,2185637,2186095,2187300,2188564,2189628,2190797,2191938,2193192,2194417,2195689,2196867,2198076,2199315,2200246,2201517,2202645,2203873,2205265,2206547,2207659,2208874,2210053,2211296,2212644,2213577,2214678,2215580,2216490,2217902,2219283,2220634,2221483,2222820,2224048,2225292,2226491,2227785,2228983,2230092,2231423,2232776,2234032,2235183,2236471,2237421,2238374,2239524,2240647,2241971,2243168,2244370,2245285,2246331,2247580,2248810,2249959,2251047,2252008,2253235,2254588,2255742,2256717,2257844,2258962,2259710,2260880,2262263,2263524,2264711,2265481,2266646,2267652,2268848,2269768,2270878,2271902,2272917,2273732,2274913,2276063,2277202,2278347,2279622,2280898,2282192,2283275,2284581,2285831,2287076,2288262,2289360,2290324,2291388,2292406,2293650,2294758,2296044,2297264,2298508,2299700,2300707,2301924,2303153,2304394,2305642,2306997,2308218,2309352,2310566,2311783,2313024,2314316,2315527,2316722,2318021,2319361,2320662,2321928,2323152,2324466,2325799,2326927,2327992,2329384,2330728,2331992,2333221,2334553,2335791,2336998,2338061,2339139,2340360,2341528,2342854,2344152,2345480,2346832,2348119,2349355,2350609,2351913,2353206,2354449,2355421,2356701,2358028,2358987,2360171,2361424,2362402,2363485,2364437,2365498,2366639,2367976,2369318,2370308,2371509,2372673,2373915,2375238,2376374,2377567,2378351,2379426,2380506,2381725,2382738,2383797,2384636,2385866,2387033,2388078,2389095,2390078,2391094,2392380,2393596,2394722,2395990,2397002,2398248,2399448,2400496,2401661,2402733,2403603,2404592,2405862,2406929,2408102,2409230,2410358,2411612,2412408,2413445,2414707,2415894,2417039,2418246,2419441,2420616,2421825,2423038,2424085,2424985,2426160,2427438,2428528,2429691,2430799,2431809,2432981,2434163,2435158,2436343,2437393,2438507,2439677,2440790,2441885,2442875,2443919,2445101,2446297,2447288,2448353,2449444,2450635,2451738,2452923,2454235,2455327,2456441,2457648,2458792,2459715,2461012,2462333,2463450,2464520,2465835,2467286,2468556,2469686,2471063,2472033,2473017,2474244,2474971,2476089,2477234,2478404,2479730,2480870,2482139,2483415,2484569,2485639,2486768,2488191,2489510,2490681,2491769,2492937,2494150,2495278,2496523,2497508,2498861,2500026,2501152,2502416,2503272,2504451,2505717,2506818,2507938,2509164,2510227,2511146,2512467,2513448,2514290,2515142,2516142,2517299,2518493,2519615,2520820,2522004,2523265,2524416,2525484,2526630,2527944,2529181,2530405,2531571,2532746,2533841,2534973,2536172,2537327,2538578,2539882,2541151,2542402,2543658,2545116,2546739,2548114,2549455,2550759,2551922,2553277,2554271,2555569,2556730,2557832,2558960,2560141,2561198,2562368,2563402,2564700,2565798,2566917,2567830,2568825,2569808,2570961,2572050,2573185,2574274,2575459,2576775,2577991,2579173,2580439,2581595,2582679,2583735,2584793,2586023,2587140,2588442,2589724,2590898,2592046,2593190,2594241,2595218,2596633,2597875,2599155,2600524,2601920,2603368,2604782,2606021,2607283,2608416,2609782,2610728,2611694,2612754,2613676,2614873,2616120,2617453,2618612,2619932,2621095,2622415,2623699,2624932,2626206,2627508,2628779,2629747,2631121,2632326,2633423,2634683,2635887,2637008,2638278,2639353,2640580,2641786,2642880,2643953,2645026,2646374,2647371,2648793,2650123,2651411,2652668,2653723,2654604,2655821,2657085,2658397,2659675,2660553,2661713,2662978,2664305,2665460,2666591,2667815,2669008,2670174,2671388,2672560,2673676,2674675,2675799,2677157,2678332,2679697,2681160,2682261,2683391,2684580,2685680,2686799,2688008,2689040,2689938,2690947,2692053,2693109,2694374,2695434,2696210,2697431,2698484,2699681,2700842,2701894,2702953,2704108,2705406,2706871,2707900,2709144,2710493,2711602,2712969,2713752,2714943,2716008,2717296,2718604,2719667,2720970,2722122,2723363,2724493,2725662,2726697,2727830,2729139,2730628,2731730,2732709,2733890,2735313,2736466,2737542,2738500,2739464,2740704,2741974,2743298,2744272,2745303,2746376,2747726,2748963,2750166,2751420,2752619,2753926,2754964,2756265,2757583,2758758,2759885,2761061,2762317,2763461,2764719,2765926,2767344,2768581,2769743,2770876,2772163,2773314,2774359,2775438,2776414,2777407,2778468,2779422,2780633,2781850,2783149,2784422,2785715,2786782,2787776,2788882,2790040,2791284,2792613,2793763,2794865,2795766,2796786,2797887,2798984,2799677,2800499,2801242,2802080,2802917,2803987,2804446,2804951,2805512,2806237,2806672,2807186,2807648,2808527,2809917,2811404,2812781,2814160,2814965,2815548,2817069,2818529,2819992,2820954,2822005,2822978,2824411,2825863,2827193,2828666,2830224,2831562,2832789,2834270,2835629,2836869,2838379,2839516,2840785,2842330,2843625,2844929,2845752,2846585,2847863,2849206,2850496,2852047,2853479,2854507,2855722,2856732,2858290,2859754,2860808,2862268,2863273,2864826,2865575,2866811,2867825,2868673,2869480,2871131,2872679,2874128,2875310,2876783,2878384,2879977,2881694,2883102,2884208,2885289,2886473,2887262,2887892,2889122,2890129,2890608,2891679,2892881,2893629,2894280,2895503,2896500,2897302,2898036,2898886,2899882,2900908,2901535,2903121,2904143,2905211,2906384,2907178,2907756,2908971,2910013,2910486,2911554,2912742,2913530,2914144,2915374,2916392,2917164,2917928,2918775,2919995,2921165,2921926,2922890,2924162,2925735,2926656,2928111,2929678,2931316,2932507,2933563,2934825,2935545,2935570,2935595,2935620,2935645,2936773,2938251,2939784,2941439,2942662,2943742,2944858,2944883,2944908,2945307,2946576,2947890,2949012,2950320,2951546,2952865,2954225,2955697,2956677,2957675,2958988,2960315,2961505,2962981,2964311,2965693,2967037,2968417,2969546,2970989,2972223,2973477,2974686,2976006,2976985,2978439,2979718,2980890,2982259,2983600,2984808,2986026,2987340,2988718,2990040,2991315,2992505,2993922,2995113,2996417,2997630,2998722,3000082,3001425,3002465,3003759,3005101,3006297,3007362,3008211,3009290,3010485,3011406,3012554,3013677,3014538,3015728,3016671,3017781,3019030,3020117,3021245,3022204,3023382,3024564,3025664,3026352,3027115,3028317,3029553,3030733,3031864,3032826,3033855,3034763,3035876,3036871,3037726,3038834,3039939,3041089,3042306,3043609,3044893,3046128,3047483,3048846,3049998,3051042,3052170,3053546,3054755,3055937,3057067,3058190,3059326,3060545,3062034,3063193,3064332,3065558,3066962,3068301,3069615,3070939,3072156,3073535,3074837,3076115,3077278,3078583,3079814,3080958,3082129,3083319,3084554,3086028,3087305,3088364,3089809,3091102,3092265,3093632,3094502,3095525,3096668,3097555,3098736,3099885,3100990,3102198,3103402,3104208,3105190,3106383,3107642,3109037,3110269,3111474,3112609,3113688,3114860,3116089,3117261,3118594,3120020,3121465,3122922,3124170,3125365,3126473,3127793,3128982,3130221,3131255,3132579,3134037,3135277,3136418,3137516,3138595,3139686,3140315,3141430,3142428,3143498,3144405,3145549,3146862,3147975,3149120,3150333,3151482,3152805,3154091,3155355,3156754,3158294,3159415,3160572,3161785,3163013,3164122,3165457,3166679,3167855,3168906,3170021,3171166,3172316,3173311,3174542,3175718,3177049,3178084,3179061,3180441,3181704,3182773,3183432,3184822,3185911,3187198,3188380,3189618,3190904,3191825,3192636,3193669,3194737,3195775,3196511,3197643,3199035,3200192,3201593,3203165,3204416,3205664,3207213,3208501,3210055,3211526,3212963,3214777,3216462,3217953,3219801,3221614,3223593,3225457,3227335,3229117,3230915,3232708,3234188,3235946,3237154,3238441,3239946,3241262,3242786,3244508,3246303,3248139,3250059,3251965,3253832,3255755,3257694,3259671,3261609,3263598,3265531,3267523,3269463,3271311,3273114,3274671,3276596,3278526,3280398,3282270,3284117,3285988,3287752,3289527,3291220,3292961,3294905,3296818,3298495,3300202,3301786,3303607,3305255,3307227,3309189,3311126,3312495,3314218,3316181,3318018,3319871,3321626,3323482,3325238,3326990,3328848,3330734,3332599,3334432,3336378,3338288,3340141,3341983,3343820,3345531,3347281,3349131,3350951,3352886,3354797,3356448,3358262,3360218,3362137,3364093,3366032,3367899,3369733,3371583,3373413,3375238,3376516,3377826,3378770,3379785,3381142,3382763,3384574,3386405,3388399,3390382,3391750,3393406,3394890,3396494,3398276,3399977,3401323,3402925,3404452,3405988,3407074,3408736,3410345,3411806,3413460,3415276,3416974,3418801,3419906,3421161,3422559,3423782,3425302,3426687,3428206,3429928,3431864,3433502,3435352,3437241,3439089,3440902,3442620,3444015,3446063,3448111,3450159,3452174,3454222,3455551,3456750,3458093,3459624,3461558,3463606,3465654,3466930,3468015,3469187,3470332,3471401,3472486,3473678,3474829,3475939,3477355,3478723,3480319,3481446,3482669,3484223,3485441,3487018,3488738,3490275,3491357,3492620,3494151,3495369,3496611,3497943,3499134,3500531,3502111,3503719,3505509,3507120,3508786,3510463,3512503,3514551,3516531,3518386,3520200,3522104,3523978,3525829,3527475,3528632,3529801,3531130,3532221,3533473,3534731,3535810,3537107,3538541,3539644,3541103,3542153,3543276,3544682,3546520,3548412,3550135,3551780,3553388,3555091,3556915,3558711,3560475,3562243,3564031,3565810,3567497,3569182,3571080,3572802,3574659,3576293,3577863,3579536,3581103,3582831,3584626,3586039,3587627,3589557,3591485,3592969,3594265,3595610,3596978,3598315,3599783,3601120,3602906,3604814,3606629,3608491,3610350,3612060,3613866,3615528,3617422,3619268,3621028,3622958,3624834,3626735,3628688,3630607,3632071,3633367,3634791,3636284,3637717,3639694,3641146,3642211,3643395,3644756,3646774,3648604,3650163,3651723,3653537,3655401,3657323,3659183,3661122,3662988,3664843,3666691,3668629,3670406,3672301,3674244,3676122,3677891,3679571,3681433,3682882,3684497,3685644,3686270,3687239,3689072,3690717,3692637,3694610,3696558,3698386,3700139,3702083,3703903,3705510,3707077,3708824,3710591,3712526,3714418,3716263,3717957,3719693,3721494,3723417,3725233,3727020,3728696,3730587,3732334,3734076,3735888,3737720,3739494,3741316,3743226,3745106,3746948,3748759,3750443,3752391,3754102,3755875,3757537,3759149,3760819,3762526,3764395,3766161,3767934,3769635,3771445,3773303,3775136,3776989,3778894,3780778,3782691,3784615,3786493,3788389,3790305,3792152,3793894,3795678,3797537,3799460,3801304,3803196,3804679,3806206,3807483,3808702,3809780,3811198,3812667,3814532,3816404,3818027,3819590,3821009,3822595,3824028,3825569,3827283,3828598,3829985,3831507,3832678,3834125,3835200,3836683,3837851,3839301,3840636,3842029,3843709,3845643,3847342,3847964,3848425,3848885,3849311,3849673,3850538,3851902,3853283,3854688,3856050,3857659,3859361,3861322,3863141,3865051,3866877,3868686,3870418,3872194,3874111,3876018,3877892,3879752,3881629,3883520,3885062,3886685,3888221,3890010,3891907,3893461,3895349,3896811,3898219,3899075,3900092,3901796,3903390,3904947,3906682,3908730,3910786,3912834,3914776,3916824,3918872,3920638,3921849,3923148,3924581,3926352,3928400,3930448,3932496,3933892,3934978,3936180,3937234,3938285,3939356,3940467,3941587,3942806,3943965,3945020,3946158,3947419,3949071,3950579,3951780,3953030,3954581,3955785,3957406,3958924,3960774,3962724,3964627,3966401,3968327,3970230,3972116,3974045,3975856,3977683,3979626,3981532,3983484,3985409,3987395,3988937,3990185,3991556,3993083,3994513,3996062,3997503,3999036,4000740,4001787,4002990,4004804,4006634,4008220,4010022,4012018,4014066,4016121,4018169,4020217,4022265,4024313,4026231,4028057,4029968,4031792,4033662,4035345,4036602,4037821,4039035,4040238,4041472,4042814,4043883,4045283,4046700,4048033,4049431,4050741,4051878,4053544,4055454,4057181,4058990,4060827,4062593,4064405,4066256,4067987,4069705,4071475,4073083,4074838,4076616,4078403,4080265,4081912,4083744,4085470,4087188,4088795,4090512,4092082,4093734,4095637,4096885,4098262,4099799,4101380,4103243,4105119,4107073,4108975,4110808,4111721,4113104,4114502,4116120,4117568,4119073,4120660,4122412,4123626,4124980,4126380,4128006,4129635,4131133,4132635,4134298,4135982,4137417,4139221,4141151,4142671,4143937,4145315,4146666,4148008,4149453,4150394,4150723,4151318,4152019,4152353,4152750,4153991,4155584,4156994,4158564,4160218,4161593,4163315,4164794,4166523,4168244,4169504,4170821,4172191,4173433,4174853,4175991,4177154,4178787,4180581,4181940,4183453,4184985,4186526,4187879,4188990,4190501,4191863,4193456,4194788,4195980,4197188,4198625,4199780,4201064,4202654,4204139,4205320,4206525,4208184,4209981,4211862,4213726,4215478,4217374,4219258,4221034,4222936,4224773,4226660,4228421,4230198,4231889,4233721,4235500,4237275,4238965,4240716,4242563,4244382,4246200,4248075,4249933,4251764,4253538,4255262,4257166,4258912,4260778,4262656,4264523,4266401,4268226,4270099,4271981,4273801,4275184,4276701,4278099,4279818,4281689,4283487,4285351,4287115,4289027,4290930,4292824,4294712,4296587,4298474,4300330,4302198,4304084,4306034,4307993,4309903,4311736,4313240,4314500,4315878,4317419,4318811,4320715,4322763,4324811,4326538,4327610,4328712,4329803,4330853,4331931,4332998,4334050,4335157,4336356,4337552,4338752,4340524,4342501,4344214,4345902,4347636,4348945,4350738,4351661,4352519,4353755,4355588,4357091,4359034,4360965,4362896,4364817,4366692,4368593,4370562,4372464,4374368,4376199,4377960,4379618,4381259,4383035,4384774,4386602,4388540,4390426,4391885,4393455,4395331,4397206,4398975,4400750,4402486,4404327,4406135,4407957,4409789,4411694,4413609,4415508,4417329,4419240,4421124,4422992,4424602,4426178,4427933,4429746,4431566,4433338,4435186,4437008,4438910,4440480,4442283,4444231,4446176,4448157,4449940,4451590,4453432,4455070,4456212,4457346,4458242,4459282,4460782,4462513,4464269,4466233,4468188,4469577,4471071,4472585,4474111,4475837,4477658,4479182,4480561,4482219,4483852,4485143,4486544,4488199,4489976,4491247,4492969,4494719,4496521,4497777,4498947,4500392,4501504,4503041,4504412,4505884,4507466,4509319,4511281,4513270,4515274,4517103,4519036,4520969,4522841,4524781,4526690,4528510,4529456,4531174,4533101,4534766,4536536,4538482,4540203,4541808,4543634,4545263,4546858,4548604,4550280,4552095,4553904,4555703,4557341,4559023,4560795,4562290,4563895,4565377,4567106,4568772,4570527,4572383,4574045,4575590,4577029,4578917,4580965,4583013,4585050,4587098,4589146,4591149,4592294,4593522,4594979,4596564,4598608,4600656,4602704,4604250,4605335,4606537,4607601,4608756,4609827,4610998,4612223,4613340,4614453,4615539,4616729,4618198,4619800,4621551,4623140,4624506,4625921,4627310,4627840,4628310,4629737,4631565,4633230,4634853,4636755,4638710,4640601,4642493,4644403,4646292,4648221,4650079,4651998,4653897,4655865,4657769,4659539,4661277,4662856,4664669,4666418,4668202,4670114,4671966,4673659,4675283,4676931,4678799,4680655,4682399,4683971,4685663,4687470,4689121,4690747,4692513,4694255,4696048,4697988,4699850,4701799,4703678,4705553,4707363,4709303,4711131,4712850,4714635,4716353,4717892,4719431,4721081,4722884,4724667,4726274,4727944,4729829,4731703,4733607,4735451,4737331,4739231,4741127,4743041,4744874,4746743,4748488,4750267,4752177,4754042,4755886,4757327,4758734,4760007,4761158,4762192,4763506,4765035,4766839,4768626,4770591,4772551,4773883,4775540,4776980,4778522,4779961,4781562,4783297,4784700,4786012,4787641,4788813,4790160,4791277,4792768,4793875,4795407,4796830,4798234,4799701,4801576,4803397,4805206,4806895,4807823,4808383,4808880,4809385,4809818,4810470,4811832,4813166,4814566,4816061,4817532,4819134,4820991,4822806,4824679,4826311,4828065,4829731,4831513,4833402,4835269,4837179,4839090,4840981,4842652,4844497,4846281,4848173,4850052,4851890,4853836,4855527,4857289,4859007,4860342,4862053,4863641,4864934,4865923,4866639,4868352,4869933,4871450,4873010,4874722,4876770,4878825,4880873,4882812,4884860,4886914,4888788,4889979,4891260,4892709,4894422,4896470,4898518,4900566,4902076,4903160,4904360,4905409,4906491,4907555,4908628,4909771,4910986,4912127,4913169,4914250,4915495,4917014,4918735,4920021,4921632,4922701,4923698,4925490,4926559,4927703,4928818,4930365,4931688,4933259,4934966,4936196,4937900,4938771,4939438,4939852,4940266,4941727,4943377,4944856,4946587,4948124,4949666,4951615,4953608,4955531,4957475,4959402,4961241,4963199,4965138,4967067,4968939,4970845,4972743,4974520,4976332,4978098,4979815,4981622,4983309,4985261,4987211,4989219,4991118,4992974,4994720,4996525,4998293,5000264,5002047,5003874,5005666,5007431,5009344,5011249,5013070,5014780,5016489,5018232,5019996,5021840,5023600,5025582,5027502,5029486,5031418,5033268,5035129,5037005,5038874,5040786,5042636,5044346,5046085,5047685,5049513,5051188,5052852,5054423,5056179,5058003,5059822,5061612,5063418,5065194,5067076,5068961,5070746,5072504,5074019,5075767,5077693,5079456,5081412,5083020,5084926,5086715,5088591,5090537,5092514,5094437,5096400,5098033,5099525,5100957,5102444,5103950,5105439,5107068,5108635,5110293,5111461,5113331,5115136,5116916,5118776,5120561,5122315,5123788,5125267,5126521,5127562,5128755,5130218,5131918,5133761,5135532,5137475,5139323,5141173,5143099,5145067,5146996,5148871,5150348,5151782,5153406,5154998,5156639,5158232,5160106,5161880,5163246,5164563,5166231,5167820,5169374,5170242,5171753,5173583,5175101,5176702,5178254,5179710,5181176,5182289,5183677,5184833,5186090,5187464,5188421,5189899,5191318,5192702,5194091,5195728,5197575,5199512,5201482,5203480,5205249,5207183,5209119,5210924,5212841,5214828,5215636,5217126,5218125,5220073,5222035,5223633,5225195,5226865,5228660,5230597,5232134,5233538,5235284,5236835,5238565,5240130,5241866,5243629,5245128,5246465,5247641,5249108,5250860,5252618,5253813,5254329,5254766,5255239,5255676,5256038,5257606,5258961,5260288,5261468,5262676,5264257,5266214,5268119,5269941,5271719,5273618,5275290,5276972,5278132,5279998,5281554,5283211,5285011,5286918,5288626,5289256,5291141,5293086,5294574,5295746,5297306,5298721,5300163,5301706,5303114,5304784,5306612,5308397,5309135,5310415,5312333,5314127,5315925,5317549,5318978,5319778,5320606,5321546,5322118,5323034,5323678,5324249,5324871,5325469,5326141,5327082,5327812,5328603,5329367,5330077,5330766,5331548,5332279,5333065,5333684,5334618,5335618,5336613,5337635,5338751,5339786,5340700,5342670,5344057,5345278,5346346,5347302,5348561,5349861,5351425,5352915,5354092,5355707,5357381,5359047,5360505,5361898,5363305,5364666,5365825,5367218,5368772,5370046,5371616,5373146,5374787,5376553,5377947,5379327,5380487,5381846,5382902,5383615,5384919,5385933,5386997,5388415,5389971,5391564,5393118,5394649,5396203,5397790,5399370,5401195,5403243,5405291,5407339,5409387,5411363,5413374,5415422,5417470,5419518,5421504,5423552,5425424,5426657,5427898,5429232,5430417,5431822,5433398,5435005,5436989,5439037,5441085,5443133,5445181,5447229,5448808,5449901,5451095,5452150,5453213,5454279,5455332,5456396,5457450,5458523,5459660,5460807,5461985,5463105,5464230,5465283,5466325,5467437,5468556,5469619,5470775,5472095,5473285,5474038,5475435,5477e3,5478541,5480337,5481500,5483379,5484854,5486814,5488744,5490710,5492629,5494482,5496392,5498250,5500185,5502124,5504039,5505691,5507411,5509054,5510871,5512585,5514538,5516471,5518360,5519695,5521427,5523376,5525212,5526962,5528814,5530559,5532286,5534082,5535853,5537710,5539564,5541525,5543421,5545296,5547186,5549032,5550859,5552512,5554191,5555946,5557734,5559564,5561418,5563256,5565133,5567056,5568647,5570545,5572473,5574456,5576462,5578339,5580114,5581955,5583812,5585080,5586393,5587367,5588315,5589590,5591200,5592929,5594835,5596799,5598483,5599973,5601656,5603157,5604764,5606637,5608336,5609532,5611166,5612725,5614209,5615043,5616856,5618496,5619762,5621529,5623382,5624938,5626440,5627467,5628949,5630209,5631568,5632955,5634411,5636124,5637903,5639845,5641813,5643824,5645842,5647606,5649581,5651439,5653344,5655321,5656927,5658375,5659641,5661585,5663425,5665114,5667036,5668849,5670453,5672211,5673845,5675382,5677145,5678760,5680552,5682361,5684248,5686048,5687805,5689429,5690813,5692484,5694090,5695729,5697535,5699271,5701100,5702835,5704466,5705896,5707715,5709763,5711820,5713845,5715898,5717946,5719956,5721093,5722326,5723773,5725326,5727370,5729418,5731466,5732974,5734057,5735257,5736323,5737483,5738551,5739726,5740949,5742057,5743178,5744253,5745429,5747040,5748679,5750162,5751075,5752299,5753554,5754981,5756595,5758197,5759647,5761232,5762520,5763787,5765332,5766635,5768272,5769943,5771507,5772865,5774524,5775913,5777756,5779267,5780649,5782061,5782938,5784633,5786041,5787670,5789576,5791538,5793504,5795344,5797243,5799042,5800930,5802803,5804642,5806467,5808345,5810237,5812175,5814091,5815854,5817597,5819293,5821029,5822826,5824418,5826253,5828183,5830029,5831645,5833378,5835209,5837198,5838995,5840710,5842451,5844261,5845949,5847738,5849574,5851433,5853293,5855089,5856983,5858882,5860815,5862592,5864338,5866184,5868021,5869724,5871403,5873120,5874831,5876443,5878001,5879836,5881680,5883549,5885485,5887330,5889160,5891102,5892843,5894551,5896380,5898256,5900107,5902064,5904026,5905998,5907847,5909487,5910903,5912368,5913912,5915475,5917156,5918779,5920470,5922320,5924159,5925975,5927816,5929686,5931522,5933043,5934495,5935893,5937132,5938258,5939537,5941068,5942715,5944569,5946323,5948314,5950153,5951999,5953961,5955906,5957884,5959405,5960768,5962380,5964011,5965616,5967250,5969060,5970853,5971920,5973362,5974856,5976477,5978117,5979245,5980462,5982013,5983798,5985397,5986977,5988410,5990001,5991318,5992380,5993874,5994991,5996252,5997615,5998973,6000489,6001809,6003350,6004806,6006406,6008287,6010208,6012237,6014197,6015881,6017822,6019735,6021562,6023536,6024771,6026428,6027073,6028902,6030908,6032396,6033893,6035275,6036829,6038766,6040524,6041938,6043483,6044897,6046313,6047892,6049444,6051130,6052841,6054290,6055594,6056642,6058038,6059699,6061487,6062972,6063516,6063983,6064448,6064912,6065297,6066514,6067785,6069130,6070163,6071451,6072814,6074595,6076571,6078329,6080193,6081314,6083210,6084856,6086702,6088465,6090210,6092186,6094066,6096029,6098003,6099060,6100480,6102110,6103843,6105699,6106744,6107695,6109587,6111336,6113109,6114199,6114926,6115774,6116497,6117242,6117898,6118584,6119148,6119931,6120587,6121595,6122456,6123275,6123949,6124594,6125460,6126163,6126944,6127536,6128220,6129072,6130150,6131145,6132126,6133194,6134387,6135343,6136664,6138586,6139876,6140974,6141986,6142938,6144175,6145720,6147298,6148670,6150126,6151695,6153461,6154915,6156835,6158765,6160252,6161294,6162898,6163773,6164847,6165829,6167190,6168375,6169931,6171527,6173316,6175364,6177418,6179466,6181514,6183445,6185493,6187541,6189597,6191590,6193635,6194769,6195930,6197323,6198505,6199831,6201326,6202947,6204819,6206867,6208915,6210963,6213011,6214989,6216161,6217240,6218409,6219460,6220614,6221663,6222719,6223782,6224844,6226006,6227195,6228324,6229445,6230530,6231583,6232668,6233865,6234969,6236274,6237663,6239336,6240864,6242478,6244212,6246107,6247996,6249927,6251832,6253728,6255199,6256476,6257842,6259400,6260801,6262321,6263830,6265526,6266788,6268511,6270130,6272034,6274082,6276130,6278178,6280133,6281957,6283821,6285637,6287517,6289337,6290693,6291943,6293209,6294441,6295645,6296876,6297934,6299319,6300723,6301865,6303072,6304384,6305762,6307496,6309353,6311063,6312700,6314329,6315898,6317689,6319426,6321091,6322845,6324439,6326216,6327982,6329685,6331480,6333066,6334886,6336660,6338276,6339971,6341668,6343261,6344986,6346739,6348103,6349409,6350816,6352513,6354388,6356313,6357794,6358976,6360445,6361966,6363344,6364831,6366647,6368269,6369440,6370909,6372401,6373972,6375510,6377049,6378612,6380255,6381764,6383190,6384409,6385741,6387017,6388320,6389765,6391283,6392471,6393902,6395185,6396741,6398572,6400418,6402299,6404119,6405887,6407793,6409444,6411313,6413198,6415059,6416955,6418778,6420644,6422514,6424453,6426060,6427357,6428705,6430248,6431566,6433389,6435437,6436685,6437738,6438932,6439986,6441116,6442411,6444387,6446095,6447951,6449741,6451681,6453645,6455608,6457506,6459466,6461371,6463331,6465222,6467133,6469108,6471064,6473006,6474956,6476907,6478645,6480131,6482037,6483835,6485759,6487642,6489569,6491374,6493291,6495128,6497057,6498955,6500855,6502800,6504221,6505503,6506937,6508380,6509795,6511247,6512873,6514157,6515808,6517e3,6517926,6519659,6520861,6522038,6523027,6524775,6526046,6527869,6529240,6530515,6532086,6532842,6533486,6535262,6536965,6538450,6540304,6541794,6543756,6545723,6547703,6549656,6551496,6553402,6555345,6557285,6559138,6561109,6563011,6564811,6566616,6568323,6570030,6571789,6573575,6575536,6577487,6579443,6581094,6582815,6584485,6586481,6588340,6590066,6591811,6593684,6595527,6597203,6598882,6600718,6602462,6604276,6606107,6608099,6610059,6611993,6613848,6615667,6617543,6619463,6621347,6623087,6624823,6626559,6628232,6629902,6631566,6633351,6635073,6636872,6638747,6640380,6642105,6643908,6645769,6647520,6649293,6650791,6652501,6654241,6656168,6658055,6659910,6661708,6663670,6665655,6667619,6669598,6670971,6672453,6673850,6675287,6676828,6678439,6679966,6681509,6682919,6684787,6686634,6688236,6690131,6691956,6693653,6695046,6696495,6697780,6698880,6700056,6701509,6703228,6704979,6706758,6708759,6710497,6712414,6714375,6716256,6718251,6719774,6721238,6722710,6724312,6725909,6727403,6729269,6731105,6732320,6733922,6735364,6737021,6738497,6739877,6741064,6742641,6744280,6745749,6747317,6748815,6750291,6751681,6752729,6754213,6755327,6756609,6757965,6759285,6760807,6762199,6763759,6765089,6766717,6768521,6770438,6772441,6774470,6776082,6778071,6779988,6781804,6783751,6785177,6786787,6787418,6789211,6791157,6792750,6794151,6795575,6797190,6799050,6800869,6802285,6803769,6805206,6806679,6808214,6809737,6811449,6813161,6814668,6816248,6817205,6818432,6820057,6821853,6823489,6824296,6824786,6825247,6825707,6826107,6826902,6828279,6829644,6830769,6831867,6833049,6834629,6836588,6838526,6840438,6842148,6843725,6845008,6846493,6848362,6849965,6851551,6853245,6855174,6856983,6858934,6860644,6861640,6863172,6864774,6865994,6867587,6869115,6870618,6872320,6874146,6875243,6876058,6877979,6879702,6881249,6883094,6883922,6884743,6885567,6886253,6887077,6887627,6888308,6888910,6889663,6890290,6891306,6892100,6892933,6893677,6894248,6895117,6895901,6896644,6897301,6897822,6898707,6899695,6900729,6901725,6902885,6903996,6904933,6906655,6908366,6909434,6910516,6911489,6912490,6913883,6915408,6916853,6918026,6919735,6921375,6923074,6924286,6925479,6926531,6927796,6929310,6930732,6932517,6934134,6935440,6936683,6938241,6939266,6940460,6941183,6942524,6943702,6944574,6946182,6947720,6949238,6950860,6952690,6954738,6956786,6958834,6960882,6962883,6964869,6966917,6968965,6971013,6972998,6975046,6976921,6978159,6979400,6980739,6981935,6983367,6984934,6986514,6988525,6990573,6992621,6994669,6996717,6998765,7000219,7001304,7002499,7003548,7004603,7005696,7006749,7007817,7008887,7009941,7011088,7012219,7013371,7014497,7015604,7016658,7017716,7018828,7019958,7021023,7022210,7023514,7024625,7025381,7026982,7028556,7029649,7030928,7032471,7033684,7035258,7037125,7039031,7040995,7042958,7044841,7046757,7048679,7050659,7052604,7054543,7056481,7058409,7060297,7062046,7063629,7065563,7067516,7069444,7071328,7073118,7075036,7076888,7078621,7080512,7082357,7084192,7086023,7087680,7089375,7091030,7092830,7094587,7096546,7098447,7100145,7101734,7103601,7105397,7107125,7108904,7110758,7112669,7114472,7116278,7118171,7120051,7121935,7123863,7125736,7127531,7129383,7131240,7132954,7134644,7136505,7138413,7140287,7142154,7143966,7145828,7147629,7149512,7151379,7153318,7155186,7156914,7158763,7160579,7162128,7163406,7164490,7165399,7166533,7167944,7169710,7171453,7173397,7175360,7176945,7178372,7179971,7181478,7183059,7184899,7186589,7187922,7189572,7191160,7192693,7193781,7195520,7197139,7198598,7200190,7201966,7203759,7205562,7206806,7207995,7209429,7210564,7212078,7213446,7214923,7216658,7218583,7220221,7222126,7223900,7225640,7227417,7229255,7230576,7232603,7234651,7236699,7238728,7240776,7242354,7243509,7244837,7246278,7248183,7250231,7252279,7253663,7254762,7255939,7257038,7258113,7259168,7260358,7261557,7262657,7263946,7265529,7266918,7268868,7270721,7272417,7274219,7275847,7277569,7279371,7280699,7282438,7284060,7285740,7287291,7288661,7289989,7291711,7293539,7295422,7297339,7299132,7301068,7302622,7303913,7305242,7306779,7308069,7309827,7310876,7312331,7313739,7315452,7317242,7318799,7320093,7321402,7322930,7324231,7325767,7327301,7328898,7330646,7332573,7334621,7336670,7338586,7340394,7342282,7344118,7346002,7347854,7349237,7350456,7351788,7352914,7354068,7355307,7356557,7357620,7358939,7360367,7361718,7363079,7364272,7365201,7366681,7368539,7370337,7372062,7373750,7375480,7377198,7379057,7380773,7382592,7384377,7386095,7387811,7389646,7391188,7393120,7394825,7396589,7398366,7400031,7401741,7403326,7405018,7406833,7408285,7409867,7411611,7413488,7415388,7416740,7418058,7419431,7420839,7422255,7423714,7425071,7426825,7428699,7430583,7432396,7434233,7435971,7437678,7439558,7441461,7443344,7445237,7447157,7448489,7449837,7451346,7452726,7454205,7456241,7457458,7458552,7459718,7461350,7463346,7465079,7466962,7468895,7470848,7472754,7474676,7476582,7478556,7480524,7482454,7484417,7486374,7488305,7490048,7491559,7492812,7494624,7495976,7497835,7499338,7500742,7502115,7502806,7503908,7505602,7506994,7508548,7510514,7512454,7514433,7516232,7518083,7519899,7521795,7523584,7525319,7527156,7529033,7530984,7532909,7534590,7536332,7537886,7539657,7541353,7543142,7545091,7547014,7548753,7550474,7552180,7554148,7555925,7557622,7559341,7561221,7562905,7564684,7566522,7568333,7570158,7572003,7573885,7575736,7577683,7579518,7581346,7583208,7584943,7586628,7588462,7590197,7591967,7593644,7595268,7596773,7598547,7600356,7602193,7604036,7605704,7607607,7609538,7611391,7613253,7615249,7617116,7619076,7621070,7623025,7624987,7626554,7628127,7629550,7631136,7632721,7634386,7635988,7637563,7639452,7641345,7643178,7644873,7646806,7648667,7650471,7651930,7653477,7654821,7655935,7657129,7658536,766e4,7661861,7663593,7665563,7667520,7669167,7671127,7673107,7674992,7676968,7678441,7679805,7681324,7682891,7684479,7686082,7687958,7689729,7690936,7692353,7693940,7695495,7697110,7698318,7699611,7701256,7702783,7704353,7706023,7707269,7708975,7710053,7711174,7712578,7713678,7714993,7716272,7717719,7719150,7720582,7722022,7723473,7725200,7727078,7728995,7731031,7732890,7734649,7736583,7738495,7740407,7742378,7743495,7745141,7745890,7747813,7749831,7751252,7752782,7754265,7755851,7757757,7759497,7760918,7762466,7763838,7765214,7766848,7768429,7770100,7771880,7773369,7774774,7775751,7777080,7778700,7780510,7782068,7782719,7783173,7783626,7784069,7784445,7785486,7786778,7788151,7789269,7790455,7791628,7793290,7795241,7797193,7799124,7800281,7802021,7803907,7805649,7807264,7808899,7810835,7812716,7814697,7816657,7818054,7819283,7820834,7822526,7824361,7825890,7826605,7828216,7830013,7831817,7833581,7834677,7835415,7836265,7836989,7837727,7838383,7839063,7839623,7840406,7841061,7842053,7842916,7843740,7844417,7845051,7845916,7846621,7847403,7847987,7848679,7849533,7850591,7851585,7852566,7853638,7854828,7855772,7857093,7859023,7860313,7861414,7862416,7863370,7864611,7866154,7867732,7869105,7870552,7872231,7874030,7875466,7877397,7879233,7880770,7881888,7883313,7884357,7885093,7886491,7887636,7888941,7890540,7892068,7893617,7895212,7896835,7898876,7900924,7902972,7905020,7907058,7909022,7911070,7913118,7915166,7917162,7918976,7920178,7921446,7922785,7923979,7925376,7926958,7928554,7930540,7932588,7934636,7936684,7938732,7940498,7941616,7942750,7943890,7944944,7946084,7947136,7948205,7949277,7950398,7951535,7952732,7953850,7954971,7956042,7957095,7958211,7959325,7960505,7961825,7963302,7964872,7966783,7968058,7969407,7971078,7971618,7972146,7973882,7975749,7977387,7979324,7981278,7983152,7985052,7986917,7988809,7990696,7992541,7994440,7996392,7998197,8000041,8001758,8003404,8005110,8006750,8008630,8010490,8012406,8014073,8015726,8017403,8019323,8021085,8022790,8024442,8026226,8027958,8029611,8031415,8033186,8034991,8036875,8038714,8040687,8042459,8044332,8046188,8047772,8049335,8051037,8052597,8054189,8055797,8057546,8059388,8060934,8062580,8064491,8066347,8068213,8070046,8071907,8073770,8075666,8077594,8079375,8081286,8083023,8084687,8086604,8088384,8090170,8091580,8092962,8094158,8095344,8096390,8097740,8099240,8101041,8102840,8104820,8106100,8107694,8109280,8110788,8112268,8114029,8115631,8116980,8118334,8119807,8121165,8122280,8123541,8124901,8126226,8127750,8129101,8130719,8132243,8134091,8135895,8137682,8139300,8139892,8140365,8140818,8141255,8141642,8142576,8143898,8145314,8146797,8148255,8149813,8151500,8153425,8155159,8156907,8158727,8160543,8162195,8163814,8165575,8167452,8169314,8171185,8173091,8175040,8176615,8178129,8179740,8181633,8183062,8184792,8186513,8187604,8188768,8189399,8191063,8192619,8194178,8195674,8197296,8199311,8201359,8203407,8205366,8207423,8209479,8211426,8212580,8213807,8215236,8216786,8218824,8220872,8222920,8224638,8225737,8226924,8227989,8229089,8230155,8231209,8232377,8233605,8234720,8235795,8236849,8238055,8239759,8241216,8242351,8243686,8245204,8246469,8248001,8249746,8251685,8253124,8254599,8256190,8256862,8258239,8260116,8261772,8263703,8265638,8267466,8269361,8271148,8273120,8274937,8276520,8278068,8279778,8281619,8283534,8285458,8287296,8289039,8290767,8292603,8294578,8296309,8297965,8299744,8301655,8303453,8305236,8307131,8308955,8310761,8312638,8314530,8316393,8318299,8319968,8321793,8323515,8325295,8327154,8328726,8330391,8332158,8333785,8335572,8337436,8339005,8340767,8342507,8344422,8346287,8348177,8350050,8351959,8353864,8355738,8357618,8359547,8361451,8363262,8365042,8366880,8368765,8370726,8372501,8374270,8375746,8377189,8378460,8379528,8380712,8382191,8383753,8385514,8387483,8388969,8390529,8392042,8393652,8395088,8396683,8398432,8399681,8400966,8402617,8403740,8405006,8406164,8407618,8408781,8410282,8411713,8413182,8414937,8416884,8418379,8418927,8419359,8419808,8420243,8420628,8421728,8423082,8424548,8425942,8427398,8428889,8430634,8432601,8434364,8436116,8437909,8439875,8441560,8443337,8445178,8447126,8449057,8450973,8452883,8454805,8456762,8458137,8459772,8461145,8463014,8464616,8466476,8468418,8469514,8470747,8471315,8473021,8474627,8476178,8477768,8479619,8481667,8483715,8485763,8487701,8489749,8491750,8493145,8494320,8495669,8497180,8499099,8501147,8503195,8505234,8506416,8507500,8508670,8509799,8510850,8511917,8513061,8514275,8515427,8516564,8517617,8518818,8520224,8521800,8523247,8525024,8526138,8527475,8528990,8530269,8531798,8533551,8535403,8537396,8539355,8541195,8543154,8545090,8547031,8549012,8550925,8552557,8554180,8555749,8557353,8558872,8560437,8561981,8563528,8565084,8566561,8568156,8569590,8571260,8572663,8574002,8575386,8576637,8577905,8579093,8580289,8581500,8582456,8583411,8584397,8585186,8586626,8587900,8589198,8590424,8591311,8592473,8593522,8594290,8595603,8596857,8598034,8599190,8599879,8601129,8602261,8603430,8604731,8605601,8606676,8607541,8608525,8609825,8611155,8612444,8613613,8614546,8615674,8616744,8617768,8618630,8619442,8620825,8622011,8623177,8624457,8625307,8626370,8627399,8628326,8629054,8630426,8631621,8632860,8634036,8634901,8636004,8636923,8637790,8638750,8639850,8640861,8641989,8643010,8643941,8644814,8645512,8646847,8648072,8649376,8650726,8651705,8652740,8653800,8654812,8655781,8656485,8657295,8658682,8659874,8660842,8661851,8663128,8664387,8665609,8666889,8667994,8668903,8670034,8671028,8671834,8672939,8673955,8675029,8676125,8677073,8677922,8678783,8679520,8680882,8682116,8683312,8684514,8685416,8686553,8687646,8688474,8689598,8690774,8691952,8693188,8694151,8695057,8696099,8697029,8697843,8699291,8700504,8701779,8702905,8703822,8704891,8705698,8706818,8708089,8709287,8710577,8711625,8712535,8713594,8714313,8715611,8716766,8717978,8719220,8720066,8721107,8722021,8722723,8724077,8725048,8725972,8726874,8727783,8728797,8730030,8731077,8732157,8733277,8734190,8735055,8735866,8736981,8738182,8739380,8740622,8741623,8742538,8743595,8744630,8745323,8746597,8747759,8748976,8750235,8751111,8752172,8753093,8753841,8754995,8756138,8757170,8758398,8759598,8760755,8761993,8762898,8763779,8764812,8765470,8766726,8767945,8769146,8770413,8771290,8772311,8773317,8774082,8775437,8776616,8777837,8779126,8779996,8781036,8782044,8782995,8783735,8785143,8786345,8787589,8788797,8789660,8790758,8791734,8792498,8793672,8794917,8796133,8797383,8798362,8799320,8800271,8801035,8802414,8803718,8805030,8806345,8807324,8808383,8809373,8810404,8811381,8812173,8813192,8814545,8815752,8816951,8818069,8818936,8820035,8820906,8821601,8822967,8824144,8825323,8826540,8827412,8828552,8829601,8830396,8831582,8832774,8833994,8835262,8836215,8837163,8838162,8839111,8839796,8841141,8842308,8843512,8844807,8845658,8846728,8847728,8848557,8849640,8850943,8852131,8853451,8854291,8855267,8856649,8857942,8859260,8860423,8861356,8862535,8863589,8864339,8865594,8866621,8867695,8868830,8869758,8870605,8871417,8872396,8873683,8875020,8876302,8877383,8878302,8879368,8880092,8881406,8882309,8883691,8884764,8886090,8887225,8888428,8889655,8890544,8891496,8892509,8893422,8894153,8895526,8896809,8898113,8899445,8900451,8901535,8902576,8903618,8904604,8905421,8906447,8907844,8908979,8910205,8911371,8912251,8913463,8914164,8915464,8916568,8917764,8919032,8919854,8920848,8921859,8922757,8923474,8924831,8925993,8927209,8928330,8929198,8930321,8931395,8932087,8933342,8934437,8935629,8936880,8937747,8938824,8939699,8940677,8941923,8943095,8944348,8945453,8946404,8947559,8948565,8949292,8950635,8951805,8953002,8954128,8954965,8955971,8956799,8957729,8958847,8959880,8960999,8961946,8962784,8963696,8964569,8965838,8966838,8967877,8968989,8969788,8970694,8971573,8972669,8974008,8975228,8976512,8977638,8978575,8979581,8980567,8981507,8982181,8982812,8983499,8984186,8984913,8985668,8986321,8987014,8987674,8988322,8989049,8989747,8990425,8991044,8991635,8992409,8993182,8993807,8994475,8995188,8995884,8996607,8997334,8997984,8999347,9000521,9001736,9002961,9003805,9004772,9005739,9006722,9007410,9008029,9008685,9009332,9010005,9010755,9011435,9012136,9012772,9013522,9014159,9014897,9015512,9016143,9016850,9017653,9018321,9018964,9019683,9020398,9021097,9021800,9022583,9023233,9024033,9024737,9026185,9027404,9028644,9029852,9030765,9031739,9032760,9033757,9034418,9035028,9035701,9036370,9037052,9037839,9038521,9039171,9039816,9040457,9041232,9041865,9042575,9043192,9043803,9044564,9045363,9046001,9046729,9047423,9048180,9049564,9050745,9051958,9053140,9054053,9055012,9055982,9056955,9057597,9058207,9058875,9059521,9060214,9060978,9061614,9062362,9063010,9063694,9064437,9065131,9065763,9066282,9066887,9067644,9068376,9069001,9069687,9070419,9071102,9071793,9072368,9073139,9073804,9074473,9075197,9075820,9076977,9077996,9079086,9080223,9081035,9081925,9082816,9083854,9085276,9086434,9087709,9088804,9089724,9090684,9091649,9092610,9093265,9093877,9094543,9095195,9095916,9096665,9097335,9098023,9098671,9099438,9100055,9100770,9101372,9102e3,9102745,9103532,9104165,9104820,9105597,9106317,9107013,9107675,9108459,9109133,9109894,9110803,9111952,9112969,9114096,9115031,9115850,9116739,9117593,9118986,9120152,9121362,9122646,9123561,9124567,9125538,9126539,9127253,9127875,9128521,9129147,9129822,9130595,9131286,9131969,9132655,9133314,9134082,9134757,9135396,9135952,9136569,9137304,9138051,9138713,9139346,9140094,9140812,9141524,9142135,9142861,9143577,9144210,9144970,9145573,9146602,9147827,9148966,9149815,9150939,9152452,9153755,9155102,9156473,9157663,9158992,9160165,9161399,9162670,9163579,9164553,9165511,9166510,9167239,9167857,9168505,9169166,9169848,9170581,9171312,9171982,9172677,9173301,9174019,9174716,9175440,9176060,9176678,9177304,9178108,9178890,9179534,9180335,9181023,9181726,9182405,9183361,9184697,9185854,9187093,9188190,9189120,9190149,9191113,9191990,9192613,9193269,9193933,9194680,9195451,9196120,9196808,9197433,9198206,9198829,9199582,9200213,9200842,9201602,9202397,9203093,9203771,9204454,9205150,9205847,9206634,9208243,9209574,9210450,9211572,9211927,9212308,9212872,9213566,9214281,9215128,9216067,9216920,9217996,9219015,9220005,9220731,9221641,9222625,9223602,9224525,9225377,9226283,9226945,9227536,9228022,9228349,9228742,9229339,9231071,9233056,9235104,9237152,9239200,9241248,9243296,9245344,9247401,9249449,9251497,9253401,9255021,9256193,9257809,9259446,9261420,9263108,9264879,9266474,9268443,9269960,9271835,9272981,9274523,9275788,9276533,9278386,9280261,9281359,9281455,9281700,9281776,9283076,9284354,9285854,9287785,9289423,9291022,9292685,9294260,9295928,9297286,9298708,9300031,9300884,9302251,9303401,9304258,9305745,9306978,9307682,9309244,9310784,9312426,9313376,9315049,9316691,9318320,9319271,9320865,9322470,9323976,9325087,9326625,9328114,9329256,9330591,9332078,9333639,9334559,9336256,9337810,9339431,9340615,9341894,9343361,9344806,9346188,9347877,9349925,9351819,9353180,9354214,9355564,9356868,9358297,9359474,9360683,9361846,9363027,9364474,9365856,9367110,9368401,9369505,9370379,9371612,9372725,9373843,9375116,9376536,9378295,9380352,9382400,9384448,9386496,9388544,9390592,9392551,9394448,9396380,9398428,9400476,9402524,9404572,9406620,9408677,9410725,9412773,9414821,9416869,9418925,9420973,9423021,9425069,9427117,9429165,9431222,9433270,9435318,9437374,9439422,9441470,9443518,9445574,9447627,9449682,9451730,9453778,9455826,9457874,9459930,9461978,9464023,9465894,9467942,9469990,9472038,9474095,9476152,9478200,9480248,9482296,9484344,9486392,9488449,9490497,9492545,9494593,9496422,9498429,9500477,9502525,9504573,9506630,9508678,9510726,9512774,9514822,9516870,9518918,9520966,9523014,9525062,9527110,9529158,9531206,9533254,9535302,9537350,9539398,9541446,9543494,9545542,9547590,9549638,9551686,9553734,9555782,9557830,9559878,9561926,9563974,9566022,9568070,9570118,9572166,9574214,9576262,9578310,9580358,9582406,9584454,9586502,9588550,9590598,9592646,9594703,9596751,9598799,9600847,9602895,9604943,9606991,9609039,9611087,9613135,9615183,9617231,9619279,9621327,9623384,9625432,9627480,9629528,9631576,9633624,9635672,9637720,9639768,9641816,9643864,9645912,9647960,9650008,9652056,9654104,9656152,9658200,9660248,9662296,9664344,9666392,9668440,9670488,9672536,9674584,9676632,9678680,9680728,9682776,9684824,9686872,9688920,9690968,9693016,9695064,9697112,9699160,9701208,9703256,9705304,9707352,9709400,9711448,9713496,9715544,9717592,9719640,9721688,9723736,9725784,9727841,9729889,9731937,9733985,9736033,9738081,9740129,9742177,9744225,9746273,9748321,9750369,9752417,9754465,9756513,9758561,9760609,9762657,9764705,9766753,9768801,9770849,9772897,9774945,9776993,9779041,9781089,9783137,9785194,9787242,9789290,9791338,9793386,9795434,9797482,9799530,9801585,9803633,9805672,9807720,9809768,9811816,9813864,9815912,9817960,9820008,9822056,9824104,9826152,9828200,9830248,9832296,9834344,9836392,9838440,9840488,9842536,9844584,9846632,9848680,9850728,9852776,9854824,9856872,9858920,9860968,9863016,9865064,9867112,9869160,9871208,9873256,9875304,9877352,9879400,9881448,9883496,9885544,9887592,9889640,9891688,9893736,9895784,9897832,9899880,9901928,9903976,9906024,9908072,9910120,9912168,9914216,9916264,9918312,9920360,9922408,9924456,9926504,9928552,9930600,9932648,9934696,9936744,9938792,9940840,9942888,9944936,9946984,9949032,9951080,9953128,9955176,9957224,9959272,9961320,9963368,9965416,9967464,9969512,9971560,9973608,9975656,9977704,9979752,9981800,9983848,9985896,9987944,9989992,9992040,9994088,9996136,9998184,10000232,10002280,10004328,10006376,10008424,10010472,10011855,10013808,10015856,10017904,10019952,10022e3,10024048,10026096,10028144,10030192,10032240,10034288,10036336,10038384,10040432,10042480,10044528,10046576,10048624,10050672,10052720,10054768,10056816,10058864,10060912,10062960,10065008,10067056,10069104,10071152,10073200,10074082,10075878,10077926,10079974,10082022,10084070,10086118,10088166,10090214,10092262,10094310,10096358,10098406,10100454,10102502,10104550,10106598,10108348,10109715,10111546,10113309,10114946,10115957,10117140,10118433,10119815,10121156,10122491,10123831,10125203,10126539,10127872,10129204,10130590,10131967,10133368,10134699,10136045,10137388,10138750,10140116,10141470,10142811,10144111,10145184,10147015,10149063,10151111,10153159,10155207,10157255,10159303,10161351,10163399,10165447,10167495,10169511,10171559,10173615,10175663,10177711,10179759,10181807,10183855,10185903,10187951,10189999,10192047,10194095,10196143,10198191,10200239,10202287,10204335,10206383,10208431,10210479,10212527,10214575,10216623,10218671,10220719,10222767,10224815,10226863,10228911,10230959,10233007,10235055,10237103,10239151,10241199,10243247,10245295,10247343,10249391,10251439,10253487,10255535,10257583,10259631,10261679,10263727,10265775,10267823,10269871,10271919,10273967,10276015,10278063,10280111,10282159,10284207,10286255,10288303,10290351,10292399,10294447,10296495,10298543,10300591,10302639,10304687,10306735,10308783,10310831,10312879,10314927,10316975,10319023,10321071,10323119,10325167,10327215,10329263,10331311,10333359,10335407,10337455,10339503,10341551,10343599,10345647,10347695,10349743,10351791,10353839,10355887,10357935,10359983,10362031,10364079,10366127,10368175,10370223,10372271,10374319,10376367,10378415,10380463,10382511,10384559,10386607,10388655,10390703,10392751,10394799,10396847,10398895,10400943,10402991,10405039,10407087,10409135,10411183,10413231,10415279,10417327,10419375,10421423,10423471,10425519,10427567,10429615,10431663,10433711,10435759,10437807,10439855,10441903,10443951,10445999,10448047,10450095,10452143,10454191,10456239,10458287,10460335,10462383,10464431,10466479,10468527,10470575,10472623,10474671,10476719,10478767,10480815,10482863,10484911,10486959,10489007,10491055,10493103,10495151,10497199,10499247,10501295,10503343,10505391,10507439,10509487,10511535,10513583,10515631,10517679,10519727,10521775,10523823,10525871,10527919,10529967,10532015,10533630,10535081,10536598,10538108,10539561,10541046,10542449,10543897,10545337,10546772,10548183,10549742,10551284,10552865,10554544,10556101,10557672,10559312,10560913,10562639,10564424,10566197,10568195,10570243,10572291,10574339,10576387,10578435,10580483,10582531,10584579,10586627,10588675,10590723,10592771,10594819,10596867,10598915,10600963,10603011,10605059,10607107,10609155,10611203,10613251,10615299,10617347,10619395,10621443,10623491,10625539,10627587,10629635,10631683,10633731,10635779,10637827,10639875,10641932,10643980,10646028,10648076,10650124,10652172,10654220,10656268,10658316,10660364,10662412,10664460,10666508,10668556,10670604,10672652,10674700,10676748,10678796,10680844,10682892,10684940,10686988,10689036,10691084,10693132,10695180,10697228,10699276,10701324,10703372,10705420,10707468,10709516,10711564,10713612,10715660,10717708,10719756,10721804,10723852,10725900,10727948,10729996,10732044,10734092,10736140,10738188,10740236,10742284,10744332,10746380,10748428,10750476,10752524,10754572,10756620,10758668,10760716,10762764,10764812,10766860,10768908,10770956,10773004,10775052,10777100,10779148,10781196,10783244,10785292,10787340,10789388,10791436,10793484,10795532,10797580,10799628,10801676,10803724,10805772,10807820,10809868,10811916,10813964,10816012,10818060,10820108,10822156,10824204,10826252,10828300,10830348,10832396,10834444,10836492,10838540,10840588,10842636,10844684,10846732,10848780,10850828,10852876,10854924,10856972,10859020,10861068,10863116,10865164,10867212,10869260,10871308,10873356,10875404,10877452,10879500,10881548,10883596,10885644,10887692,10889740,10891788,10893836,10895884,10897932,10899980,10902028,10904076,10906124,10908172,10910220,10912268,10914316,10916364,10918412,10920460,10922508,10924556,10926604,10928652,10930700,10932748,10934796,10936844,10938892,10940940,10942988,10945036,10947084,10949132,10951180,10953228,10955276,10957324,10959372,10961420,10963468,10965516,10967564,10969612,10971660,10973708,10975756,10977804,10979852,10981900,10983948,10985996,10988044,10990092,10992140,10994188,10996236,10998284,11000332,11002380,11004428,11006476,11008524,11010572,11012620,11014668,11016716,11018764,11020812,11022860,11024908,11026956,11029004,11031052,11033100,11035148,11037196,11039244,11041292,11043340,11045388,11047436,11049484,11051532,11053580,11055628,11057676,11059724,11061772,11063820,11065868,11067916,11069964,11072012,11074060,11076108,11078156,11080204,11082252,11084300,11086348,11088396,11090444,11092492,11094540,11096588,11098636,11100684,11102732,11104780,11106828,11108876,11110924,11112972,11115020,11117068,11119116,11121164,11123212,11125260,11127308,11129356,11131404,11133452,11135500,11137548,11139596,11141644,11143692,11145740,11147788,11149836,11151884,11153932,11155980,11158028,11160076,11162124,11164172,11166220,11168268,11170316,11172364,11174412,11176460,11178508,11180556,11182604,11184652,11186700,11188748,11190796,11192844,11194892,11196940,11198988,11201036,11203084,11205132,11207180,11209228,11211276,11213324,11215372,11217420,11219468,11221399,11222794,11223842,11224992,11226130,11227228,11228408,11229535,11230578,11231674,11232754,11233749,11234931,11236011,11236933,11237959,11239055,11240171,11241270,11242490,11243721,11244661,11245825,11246994,11248091,11249357,11250580,11251763,11252840,11253884,11254903,11256085,11257140,11258065,11258692,11259473,11260494,11261649,11262749,11263884,11264668,11265640,11266927,11268042,11269187,11270671,11271927,11273109,11274193,11275227,11276213,11277382,11278388,11279469,11280710,11282021,11283376,11284519,11285649,11286566,11287645,11288719,11289999,11291233,11292327,11293557,11294742,11295945,11296967,11297867,11299139,11300181,11301199,11302240,11303040,11304104,11305008,11306034,11307253,11307919,11308890,11309877,11311204,11312293,11313655,11314931,11316109,11317189,11318270,11319186,11320189,11321395,11322395,11323277,11324379,11325581,11326582,11327540,11328538,11329496,11330255,11331205,11332177,11333133,11334056,11334969,11336188,11337665,11338833,11339835,11340899,11341793,11342852,11343765,11344803,11345921,11346919,11347753,11348838,11349888,11351033,11352031,11352922,11353956,11355144,11356494,11357472,11358380,11359358,11360344,11361523,11362675,11363586,11364632,11365788,11366838,11367792,11369024,11370290,11371439,11372419,11373586,11374844,11376047,11377314,11378639,11380054,11381442,11382660,11383832,11384966,11386434,11387707,11388917,11390043,11391026,11392134,11393275,11394581,11395605,11396774,11397970,11399151,11400057,11401224,11402401,11403573,11404557,11405744,11406853,11407989,11409272,11410494,11411680,11412935,11414165,11415491,11416777,11417948,11419084,11420317,11421464,11422648,11423837,11425105,11426110,11427216,11428365,11429490,11430718,11432087,11433384,11434569,11435545,11436755,11438056,11439316,11440672,11441896,11443075,11444028,11445015,11446417,11447258,11448225,11449349,11450674,11451887,11452944,11454085,11455225,11456347,11457438,11458557,11459586,11460950,11462135,11462797,11463452,11464212,11464512,11465187,11465976,11466672,11467379,11467993,11468604,11469408,11470166,11470921,11471664,11472258,11472864,11473537,11474247,11474809,11475341,11475910,11476553,11477356,11478007,11478581,11478922,11479330,11479745,11480219,11480810,11481438,11482022,11482484,11482948,11483392,11483810,11484162,11484509,11484860,11485209,11485654,11486071,11486493,11486923,11487345,11487613,11487969,11488446,11488773,11489227,11489585,11489989,11490344,11490650,11490874,11491100,11491319,11491544,11491763,11491998,11492233,11492455,11492700,11492936,11493168,11493399,11493630,11493864,11494092,11494322,11494550,11494794,11495022,11495248,11495473,11495698,11495920,11496143,11496368,11496592,11496832,11497057,11497279,11497502,11497733,11497961,11498194,11498423,11498647,11498867,11499112,11499345,11499573,11499808,11500036,11500271,11500501,11500738,11500965,11501200,11501429,11501658,11501886,11502113,11502340,11502567,11502796,11503020,11503420,11503865,11504236,11504632,11504998,11505447,11505804,11506121,11506788,11507356,11507833,11508257,11509199],sizes:[1320,1459,1417,1355,1321,1156,1092,1171,1270,1288,1241,750,1289,1344,1156,1272,1439,1344,1094,1462,1280,1161,1295,1337,1091,1352,710,1052,1230,882,1463,994,752,744,783,815,792,823,742,907,871,755,687,786,818,741,773,746,804,837,739,792,1339,1127,1006,969,831,1015,897,909,979,1e3,1031,986,1025,967,958,996,971,983,973,968,943,1033,962,1022,988,975,969,1017,963,944,890,981,1009,910,945,930,908,952,989,1225,1608,1612,1620,1589,1601,1598,1607,1635,1622,1623,1659,1586,1600,1608,1603,1590,1605,1025,948,966,952,940,1079,1381,1213,1208,1236,1206,1224,1224,1180,1205,1188,1195,1199,1203,1190,1030,988,1065,1403,1431,1429,1213,1280,1252,1117,894,902,1067,1067,1051,945,1338,1463,1015,942,942,826,848,848,1143,995,786,895,1194,566,582,567,660,572,695,627,626,618,928,980,963,1001,853,862,881,941,934,897,940,852,892,872,941,744,693,829,1252,1083,1313,1646,1333,1204,1394,1169,1212,955,916,1502,1336,1295,1203,1383,1321,1249,1310,1259,1298,1374,1332,1207,1369,1182,1206,1249,1267,1316,1129,1348,1197,1239,1144,1239,1206,1290,1236,1339,1278,1264,1195,1100,1341,1222,1262,1260,1030,1100,1218,1127,1238,1162,1051,1166,1032,1104,1070,1168,1183,1249,1278,1360,1298,1308,903,1132,1395,1412,1365,1010,806,1109,1109,1010,993,979,1015,1149,1162,1156,1240,1180,892,1139,1018,1199,927,1193,1161,940,843,1190,784,1099,920,1102,1014,941,1173,1195,1097,1151,1073,1085,1084,1050,864,1074,1211,1233,923,966,879,1349,1064,1224,1203,1248,1035,1167,1244,1029,1170,1219,1209,1160,1199,1170,1207,1194,1111,1333,1163,1245,1211,1169,1129,1383,1412,1413,1436,1229,1286,1099,943,753,1034,1237,1173,1213,1262,864,1052,1150,1276,1149,1142,801,914,1296,1089,1300,1343,1251,1184,1196,1077,1081,1263,1044,1238,1247,1175,1062,1184,1251,1174,1089,1109,1288,1042,1099,1321,1290,1239,1059,1063,1117,925,896,1080,1104,940,991,1269,1009,1103,1238,1186,1389,1422,1197,1134,1292,1211,1238,1255,1178,1205,1286,1291,966,1044,1152,1382,1256,1084,1237,1404,1015,1345,1352,1310,1131,1141,978,1426,1425,1008,1321,1097,1106,1235,1092,1040,1239,1252,954,1156,1122,1103,1117,1074,1214,1367,1200,1236,1153,1192,1188,1230,1183,1249,1095,1029,1033,918,1118,1263,1181,1306,1278,1103,1132,1320,1081,1218,1336,1511,1079,1410,1087,1141,1221,910,1125,1219,1048,1188,1197,1035,912,1164,897,1182,1356,1193,1303,1314,1237,817,1244,1287,1141,1301,1196,1470,1474,1275,1260,1015,1347,1435,1459,1453,1264,1288,902,1169,1170,1190,1263,1208,1299,1120,1098,1149,1241,1251,1108,1293,1199,1280,1102,1223,1279,1280,1331,1185,1185,1244,1160,1044,1307,1311,1099,1191,1361,1324,1131,1246,1155,1407,1269,1086,1268,1264,1195,1406,1289,1166,1193,1085,1112,1156,1124,1220,1115,1265,1095,1187,1205,1166,1127,1023,1244,1339,1214,1265,1300,1536,1505,1318,1349,1400,1387,1318,985,1310,1188,1310,1072,1190,1232,1296,1099,1089,1134,1196,1100,1286,1185,1228,1373,1044,1204,1287,1080,734,1251,1103,1127,1187,1295,1518,1067,1273,1335,1187,1109,936,958,1428,1287,1371,1022,1414,1393,1353,1279,1097,1369,1467,1374,1409,1252,885,1218,1177,1324,1356,1246,1034,1200,1076,1079,981,1234,1259,926,906,1251,1262,1229,1211,1285,1221,1240,1278,1152,1254,1251,1042,1087,948,1235,1171,1150,1196,1258,1340,1375,1045,1324,1124,958,1181,1112,1181,1225,1232,1201,1204,1123,1349,1520,1101,1217,1322,1327,1216,1161,1216,1229,1124,1419,1193,1030,1103,1048,1238,1467,1198,1189,930,1318,1358,1008,1427,1140,898,1159,1494,1098,999,1056,1152,1192,1229,1263,996,1071,1227,1144,1282,1045,1209,1120,1073,682,849,1132,1364,1276,1189,1261,1285,1255,1203,1200,1102,1054,1097,1210,1239,1315,1284,1080,1451,1152,1165,1219,1215,1196,1295,1187,1213,1156,1385,1366,1473,1330,1335,1101,1414,1162,1245,1323,1190,1213,1036,1189,1189,1095,1141,1153,1151,974,1231,1123,939,1074,991,1148,1194,1189,1121,1206,1244,989,1070,994,1032,955,909,870,814,1052,1075,1153,1111,1412,1382,1107,1239,1032,717,1087,1222,1099,1221,1218,1041,1131,914,956,855,757,1101,1119,933,972,709,1047,1344,1197,892,1197,787,1145,1027,945,1099,1202,810,856,740,786,664,630,928,1385,1390,1069,1109,934,814,1299,1208,1187,1050,1237,1107,705,1207,1103,1144,1027,1113,1369,1132,1016,1433,834,1164,1250,989,1202,1148,1079,1130,1136,1223,909,1319,1325,856,920,1160,1044,1003,1058,816,948,1221,1075,1154,1123,1025,985,927,1094,1005,960,867,1209,1108,1300,939,1229,1272,1186,1138,1236,1280,1352,1232,1263,1387,1384,1297,1348,993,1163,1339,1268,1150,1252,1451,1197,1267,1203,905,1243,1298,1132,1118,1192,1102,1125,1048,1110,1192,1194,1179,1097,1119,1174,1107,1230,988,1111,997,1156,1211,945,972,1128,1137,1162,1114,967,887,1069,1145,1204,1337,1141,1174,744,1123,922,1098,1299,1085,1203,1083,1200,1020,1164,1302,1392,1254,1104,1233,1357,1293,1217,798,883,992,960,965,648,1271,1109,1098,964,1067,732,1118,1283,1330,1146,1092,877,736,533,936,791,640,1094,693,936,926,1104,1155,1065,720,1283,1094,1025,987,987,1027,978,1195,1034,1068,1268,1268,1264,1158,997,1206,1299,1187,1068,1224,1037,1172,1199,1108,903,958,897,1363,1276,1364,1298,1022,1153,1269,1238,1232,1477,1303,1363,1129,1339,1215,1163,990,1209,1210,1313,1253,1270,1154,1327,1367,1400,1305,1352,1433,1260,1190,1351,1324,1179,1288,1169,1169,1257,1202,1346,1344,1012,1137,1129,1208,1491,1302,1436,1254,769,592,956,677,909,817,977,697,957,771,953,908,857,910,763,391,414,859,1472,1499,1339,1250,1238,1005,1290,1419,1283,1352,970,1218,1265,1244,1460,1384,1540,1291,1298,1352,1284,1142,1175,1280,1226,1135,1126,1117,1065,1187,1232,1301,1073,1018,1210,1197,1197,1329,1324,1354,1132,733,874,1153,1152,996,728,950,1297,1119,1315,1394,1136,1262,979,851,676,1277,1257,1172,930,1008,774,612,759,1086,1047,1152,1210,1286,1154,1248,1259,1227,1225,1074,1182,1075,1337,1311,1259,1185,1179,1023,1006,924,1123,1020,1100,1065,1470,1150,1338,1310,1178,1242,1181,1081,1278,1139,1532,1195,1213,1342,1141,1150,1273,1212,1245,1110,1133,1200,1022,1286,1256,1032,1460,1048,1109,1336,1338,1057,1313,1323,1334,1198,1187,1139,1236,1057,1001,1095,1045,975,1120,990,581,1144,1188,1055,949,886,1197,1228,1015,1014,1042,1140,1054,1102,719,1015,1119,1118,1167,1164,1156,1182,1012,1106,1222,1326,1217,1320,1363,1228,1054,1279,1212,1186,1289,1158,935,1246,1021,1093,1183,1126,1257,1298,1172,1210,1109,1240,1044,1109,1159,1217,1292,1304,1049,1498,1383,1168,1184,1131,1156,1264,1288,1066,1146,1235,987,1215,1133,1138,1255,1130,1221,1236,1114,1016,1291,1122,1299,1320,1387,1147,912,1134,1295,1020,1418,1444,1258,1284,1177,598,902,1009,1187,1138,1194,1164,1386,1048,902,743,1098,1069,591,1228,1334,1325,1207,938,1083,1124,1278,1023,1086,1204,989,667,862,915,1004,1125,1222,1066,945,1202,1073,906,1145,1171,975,1368,878,1509,1274,1295,1067,1158,1059,1302,1223,1215,1259,1415,1264,1198,1170,1101,975,782,1096,1233,1196,1286,1131,1113,1111,1165,1135,1236,1239,1052,755,1058,1149,933,1066,1107,747,1231,1220,891,1024,1303,1172,1077,929,1054,936,946,1116,1150,1380,1310,1148,1106,963,1303,1278,1170,1275,1037,1199,1556,1444,1533,1428,1310,1660,1432,1497,898,825,846,1139,1460,1510,1348,1261,1645,1221,1274,1359,404,25,25,786,1253,781,1051,1017,1040,1428,1136,1561,1325,1305,1380,1308,1605,1377,1221,1264,1454,1610,1621,1413,1182,1489,1613,1068,1388,1300,948,1535,1138,1352,1420,852,25,25,25,1014,1172,966,1032,552,564,550,546,524,563,552,699,794,787,603,619,605,609,645,618,585,624,623,596,710,1493,1661,1234,1049,507,1252,1417,1536,889,1050,916,1293,1415,1156,1362,1427,1206,1116,958,1384,660,1256,1085,942,1259,862,1145,1301,715,1221,1118,949,1399,656,1272,1112,954,1453,1474,993,844,248,545,1228,1522,994,1157,1178,1189,1284,1580,1346,1365,1200,677,1327,1389,1381,644,1414,1270,1380,1039,1305,1162,1379,1464,771,1262,1428,1412,825,1304,1372,1361,555,1308,1411,1417,553,1485,1260,1458,787,1494,1658,1096,1343,1438,1278,25,25,197,1315,1237,1062,993,746,842,674,1372,1490,1335,1439,759,612,1534,1440,1437,945,1021,969,1406,1441,1372,1501,1356,1381,1243,1409,1471,1343,1449,1580,1424,1490,1365,1571,1415,1474,1297,1304,1402,915,1386,1208,999,1152,1400,1337,1453,1494,1479,1270,1578,1601,1333,1245,1206,1454,254,25,25,1203,1622,1464,1369,1412,1329,1687,1699,1566,1528,1589,1306,1544,1643,1547,1585,1671,1546,1597,1650,1558,1486,1567,1286,1278,1352,1384,1307,1192,832,888,733,940,576,571,604,553,520,699,577,603,396,518,470,502,653,1009,655,565,779,1586,1482,1445,1495,1553,1420,1200,688,734,1452,1492,1337,1285,1674,1466,1664,1482,1393,1500,1572,1201,1538,1437,1456,1573,1598,1531,1468,1634,1590,1608,1664,1485,1540,1547,1680,1622,1643,1695,1689,1476,1620,1551,1566,1594,1473,1544,1521,1587,1652,1466,1664,1623,1625,1571,1727,1711,1547,1476,1581,1698,1588,1608,1362,1545,1708,1692,1602,1585,1474,1568,1398,1419,887,820,1195,1068,1069,1088,988,1092,1040,1016,994,861,1164,819,800,807,763,765,842,811,1314,1501,1228,1353,1570,1467,1331,1288,1279,1291,1275,1266,1173,1083,1296,1310,1276,1325,1122,1115,1312,1125,1023,1180,1238,1381,1132,943,1156,1206,1242,1153,1259,1215,1025,1203,1446,1342,1365,779,25,25,110,1191,1371,1022,1018,1041,1235,1523,1541,1678,1589,1527,1215,1548,1556,1603,1617,1331,1325,1540,1482,1385,1596,1099,1173,1347,1300,1242,1193,819,1108,1101,826,942,1228,1120,1129,1320,1351,604,1102,1024,1206,1312,1668,1229,1440,1592,1567,1469,1501,1406,1392,1721,410,403,405,403,409,407,420,461,380,340,374,458,1205,1264,1064,1169,1141,1254,1225,1272,1178,1209,1239,931,1271,1128,1228,1392,1282,1112,1215,1179,1243,1348,933,1101,902,910,1412,1381,1351,849,1337,1228,1244,1199,1294,1198,1109,1331,1353,1256,1151,1288,950,953,1150,1123,1324,1197,1202,915,1046,1249,1230,1149,1088,961,1227,1353,1154,975,1127,1118,748,1170,1383,1261,1187,770,1165,1006,1196,920,1110,1024,1015,815,1181,1150,1139,1145,1275,1276,1294,1083,1306,1250,1245,1186,1098,964,1064,1018,1244,1108,1286,1220,1244,1192,1007,1217,1229,1241,1248,1355,1221,1134,1214,1217,1241,1292,1211,1195,1299,1340,1301,1266,1224,1314,1333,1128,1065,1392,1344,1264,1229,1332,1238,1207,1063,1078,1221,1168,1326,1298,1328,1352,1287,1236,1254,1304,1293,1243,972,1280,1327,959,1184,1253,978,1083,952,1061,1141,1337,1342,990,1201,1164,1242,1323,1136,1193,784,1075,1080,1219,1013,1059,839,1230,1167,1045,1017,983,1016,1286,1216,1126,1268,1012,1246,1200,1048,1165,1072,870,989,1270,1067,1173,1128,1128,1254,796,1037,1262,1187,1145,1207,1195,1175,1209,1213,1047,900,1175,1278,1090,1163,1108,1010,1172,1182,995,1185,1050,1114,1170,1113,1095,990,1044,1182,1196,991,1065,1091,1191,1103,1185,1312,1092,1114,1207,1144,923,1297,1321,1117,1070,1315,1451,1270,1130,1377,970,984,1227,727,1118,1145,1170,1326,1140,1269,1276,1154,1070,1129,1423,1319,1171,1088,1168,1213,1128,1245,985,1353,1165,1126,1264,856,1179,1266,1101,1120,1226,1063,919,1321,981,842,852,1e3,1157,1194,1122,1205,1184,1261,1151,1068,1146,1314,1237,1224,1166,1175,1095,1132,1199,1155,1251,1304,1269,1251,1256,1458,1623,1375,1341,1304,1163,1355,994,1298,1161,1102,1128,1181,1057,1170,1034,1298,1098,1119,913,995,983,1153,1089,1135,1089,1185,1316,1216,1182,1266,1156,1084,1056,1058,1230,1117,1302,1282,1174,1148,1144,1051,977,1415,1242,1280,1369,1396,1448,1414,1239,1262,1133,1366,946,966,1060,922,1197,1247,1333,1159,1320,1163,1320,1284,1233,1274,1302,1271,968,1374,1205,1097,1260,1204,1121,1270,1075,1227,1206,1094,1073,1073,1348,997,1422,1330,1288,1257,1055,881,1217,1264,1312,1278,878,1160,1265,1327,1155,1131,1224,1193,1166,1214,1172,1116,999,1124,1358,1175,1365,1463,1101,1130,1189,1100,1119,1209,1032,898,1009,1106,1056,1265,1060,776,1221,1053,1197,1161,1052,1059,1155,1298,1465,1029,1244,1349,1109,1367,783,1191,1065,1288,1308,1063,1303,1152,1241,1130,1169,1035,1133,1309,1489,1102,979,1181,1423,1153,1076,958,964,1240,1270,1324,974,1031,1073,1350,1237,1203,1254,1199,1307,1038,1301,1318,1175,1127,1176,1256,1144,1258,1207,1418,1237,1162,1133,1287,1151,1045,1079,976,993,1061,954,1211,1217,1299,1273,1293,1067,994,1106,1158,1244,1329,1150,1102,901,1020,1101,1097,693,822,743,838,837,1070,459,505,561,725,435,514,462,879,1390,1487,1377,1379,805,583,1521,1460,1463,962,1051,973,1433,1452,1330,1473,1558,1338,1227,1481,1359,1240,1510,1137,1269,1545,1295,1304,823,833,1278,1343,1290,1551,1432,1028,1215,1010,1558,1464,1054,1460,1005,1553,749,1236,1014,848,807,1651,1548,1449,1182,1473,1601,1593,1717,1408,1106,1081,1184,789,630,1230,1007,479,1071,1202,748,651,1223,997,802,734,850,996,1026,627,1586,1022,1068,1173,794,578,1215,1042,473,1068,1188,788,614,1230,1018,772,764,847,1220,1170,761,964,1272,1573,921,1455,1567,1638,1191,1056,1262,720,25,25,25,25,1128,1478,1533,1655,1223,1080,1116,25,25,399,1269,1314,1122,1308,1226,1319,1360,1472,980,998,1313,1327,1190,1476,1330,1382,1344,1380,1129,1443,1234,1254,1209,1320,979,1454,1279,1172,1369,1341,1208,1218,1314,1378,1322,1275,1190,1417,1191,1304,1213,1092,1360,1343,1040,1294,1342,1196,1065,849,1079,1195,921,1148,1123,861,1190,943,1110,1249,1087,1128,959,1178,1182,1100,688,763,1202,1236,1180,1131,962,1029,908,1113,995,855,1108,1105,1150,1217,1303,1284,1235,1355,1363,1152,1044,1128,1376,1209,1182,1130,1123,1136,1219,1489,1159,1139,1226,1404,1339,1314,1324,1217,1379,1302,1278,1163,1305,1231,1144,1171,1190,1235,1474,1277,1059,1445,1293,1163,1367,870,1023,1143,887,1181,1149,1105,1208,1204,806,982,1193,1259,1395,1232,1205,1135,1079,1172,1229,1172,1333,1426,1445,1457,1248,1195,1108,1320,1189,1239,1034,1324,1458,1240,1141,1098,1079,1091,629,1115,998,1070,907,1144,1313,1113,1145,1213,1149,1323,1286,1264,1399,1540,1121,1157,1213,1228,1109,1335,1222,1176,1051,1115,1145,1150,995,1231,1176,1331,1035,977,1380,1263,1069,659,1390,1089,1287,1182,1238,1286,921,811,1033,1068,1038,736,1132,1392,1157,1401,1572,1251,1248,1549,1288,1554,1471,1437,1814,1685,1491,1848,1813,1979,1864,1878,1782,1798,1793,1480,1758,1208,1287,1505,1316,1524,1722,1795,1836,1920,1906,1867,1923,1939,1977,1938,1989,1933,1992,1940,1848,1803,1557,1925,1930,1872,1872,1847,1871,1764,1775,1693,1741,1944,1913,1677,1707,1584,1821,1648,1972,1962,1937,1369,1723,1963,1837,1853,1755,1856,1756,1752,1858,1886,1865,1833,1946,1910,1853,1842,1837,1711,1750,1850,1820,1935,1911,1651,1814,1956,1919,1956,1939,1867,1834,1850,1830,1825,1278,1310,944,1015,1357,1621,1811,1831,1994,1983,1368,1656,1484,1604,1782,1701,1346,1602,1527,1536,1086,1662,1609,1461,1654,1816,1698,1827,1105,1255,1398,1223,1520,1385,1519,1722,1936,1638,1850,1889,1848,1813,1718,1395,2048,2048,2048,2015,2048,1329,1199,1343,1531,1934,2048,2048,1276,1085,1172,1145,1069,1085,1192,1151,1110,1416,1368,1596,1127,1223,1554,1218,1577,1720,1537,1082,1263,1531,1218,1242,1332,1191,1397,1580,1608,1790,1611,1666,1677,2040,2048,1980,1855,1814,1904,1874,1851,1646,1157,1169,1329,1091,1252,1258,1079,1297,1434,1103,1459,1050,1123,1406,1838,1892,1723,1645,1608,1703,1824,1796,1764,1768,1788,1779,1687,1685,1898,1722,1857,1634,1570,1673,1567,1728,1795,1413,1588,1930,1928,1484,1296,1345,1368,1337,1468,1337,1786,1908,1815,1862,1859,1710,1806,1662,1894,1846,1760,1930,1876,1901,1953,1919,1464,1296,1424,1493,1433,1977,1452,1065,1184,1361,2018,1830,1559,1560,1814,1864,1922,1860,1939,1866,1855,1848,1938,1777,1895,1943,1878,1769,1680,1862,1449,1615,1147,626,969,1833,1645,1920,1973,1948,1828,1753,1944,1820,1607,1567,1747,1767,1935,1892,1845,1694,1736,1801,1923,1816,1787,1676,1891,1747,1742,1812,1832,1774,1822,1910,1880,1842,1811,1684,1948,1711,1773,1662,1612,1670,1707,1869,1766,1773,1701,1810,1858,1833,1853,1905,1884,1913,1924,1878,1896,1916,1847,1742,1784,1859,1923,1844,1892,1483,1527,1277,1219,1078,1418,1469,1865,1872,1623,1563,1419,1586,1433,1541,1714,1315,1387,1522,1171,1447,1075,1483,1168,1450,1335,1393,1680,1934,1699,622,461,460,426,362,865,1364,1381,1405,1362,1609,1702,1961,1819,1910,1826,1809,1732,1776,1917,1907,1874,1860,1877,1891,1542,1623,1536,1789,1897,1554,1888,1462,1408,856,1017,1704,1594,1557,1735,2048,2056,2048,1942,2048,2048,1766,1211,1299,1433,1771,2048,2048,2048,1396,1086,1202,1054,1051,1071,1111,1120,1219,1159,1055,1138,1261,1652,1508,1201,1250,1551,1204,1621,1518,1850,1950,1903,1774,1926,1903,1886,1929,1811,1827,1943,1906,1952,1925,1986,1542,1248,1371,1527,1430,1549,1441,1533,1704,1047,1203,1814,1830,1586,1802,1996,2048,2055,2048,2048,2048,2048,1918,1826,1911,1824,1870,1683,1257,1219,1214,1203,1234,1342,1069,1400,1417,1333,1398,1310,1137,1666,1910,1727,1809,1837,1766,1812,1851,1731,1718,1770,1608,1755,1778,1787,1862,1647,1832,1726,1718,1607,1717,1570,1652,1903,1248,1377,1537,1581,1863,1876,1954,1902,1833,913,1383,1398,1618,1448,1505,1587,1752,1214,1354,1400,1626,1629,1498,1502,1663,1684,1435,1804,1930,1520,1266,1378,1351,1342,1445,941,329,595,701,334,397,1241,1593,1410,1570,1654,1375,1722,1479,1729,1721,1260,1317,1370,1242,1420,1138,1163,1633,1794,1359,1513,1532,1541,1353,1111,1511,1362,1593,1332,1192,1208,1437,1155,1284,1590,1485,1181,1205,1659,1797,1881,1864,1752,1896,1884,1776,1902,1837,1887,1761,1777,1691,1832,1779,1775,1690,1751,1847,1819,1818,1875,1858,1831,1774,1724,1904,1746,1866,1878,1867,1878,1825,1873,1882,1820,1383,1517,1398,1719,1871,1798,1864,1764,1912,1903,1894,1888,1875,1887,1856,1868,1886,1950,1959,1910,1833,1504,1260,1378,1541,1392,1904,2048,2048,1727,1072,1102,1091,1050,1078,1067,1052,1107,1199,1196,1200,1772,1977,1713,1688,1734,1309,1793,923,858,1236,1833,1503,1943,1931,1931,1921,1875,1901,1969,1902,1904,1831,1761,1658,1641,1776,1739,1828,1938,1886,1459,1570,1876,1875,1769,1775,1736,1841,1808,1822,1832,1905,1915,1899,1821,1911,1884,1868,1610,1576,1755,1813,1820,1772,1848,1822,1902,1570,1803,1948,1945,1981,1783,1650,1842,1638,1142,1134,896,1040,1500,1731,1756,1964,1955,1389,1494,1514,1526,1726,1821,1524,1379,1658,1633,1291,1401,1655,1777,1271,1722,1750,1802,1256,1170,1445,1112,1537,1371,1472,1582,1853,1962,1989,2004,1829,1933,1933,1872,1940,1909,1820,946,1718,1927,1665,1770,1946,1721,1605,1826,1629,1595,1746,1676,1815,1809,1799,1638,1682,1772,1495,1605,1482,1729,1666,1755,1856,1662,1545,1439,1888,2048,2048,2037,2048,2048,2003,1145,1228,1457,1585,2044,2048,2048,1546,1085,1202,1064,1155,1071,1171,1225,1117,1113,1086,1190,1469,1602,1751,1589,1366,1415,1389,530,470,1427,1828,1665,1623,1902,1955,1891,1892,1910,1889,1929,1858,1919,1899,1968,1904,1770,1738,1579,1813,1749,1784,1912,1852,1693,1624,1648,1868,1856,1744,1572,1692,1807,1651,1626,1766,1742,1793,1940,1862,1949,1879,1875,1810,1940,1828,1719,1785,1718,1539,1539,1650,1803,1783,1607,1670,1885,1874,1904,1844,1880,1900,1896,1914,1833,1869,1745,1779,1910,1865,1844,1441,1407,1273,1151,1034,1314,1529,1804,1787,1965,1960,1332,1657,1440,1542,1439,1601,1735,1403,1312,1629,1172,1347,1117,1491,1107,1532,1423,1404,1467,1875,1821,1809,1689,928,560,497,505,433,652,1362,1334,1400,1495,1471,1602,1857,1815,1873,1632,1754,1666,1782,1889,1867,1910,1911,1891,1671,1845,1784,1892,1879,1838,1946,1691,1762,1718,1335,1711,1588,1293,989,716,1713,1581,1517,1560,1712,2048,2055,2048,1939,2048,2054,1874,1191,1281,1449,1713,2048,2048,2048,1510,1084,1200,1049,1082,1064,1073,1143,1215,1141,1042,1081,1245,1519,1721,1286,1611,1069,997,1792,1069,1144,1115,1547,1323,1571,1707,1230,1704,871,667,414,414,1461,1650,1479,1731,1537,1542,1949,1993,1923,1944,1927,1839,1958,1939,1929,1872,1906,1898,1777,1812,1766,1717,1807,1687,1952,1950,2008,1899,1856,1746,1805,1768,1971,1783,1827,1792,1765,1913,1905,1821,1710,1709,1743,1764,1844,1760,1982,1920,1984,1932,1850,1861,1876,1869,1912,1850,1710,1739,1600,1828,1675,1664,1571,1756,1824,1819,1790,1806,1776,1882,1885,1785,1758,1515,1748,1926,1763,1956,1608,1906,1789,1876,1946,1977,1923,1963,1633,1492,1432,1487,1506,1489,1629,1567,1658,1168,1870,1805,1780,1860,1785,1754,1473,1479,1254,1041,1193,1463,1700,1843,1771,1943,1848,1850,1926,1968,1929,1875,1477,1434,1624,1592,1641,1593,1874,1774,1366,1317,1668,1589,1554,868,1511,1830,1518,1601,1552,1456,1466,1113,1388,1156,1257,1374,957,1478,1419,1384,1389,1637,1847,1937,1970,1998,1769,1934,1936,1805,1917,1987,808,1490,999,1948,1962,1598,1562,1670,1795,1937,1537,1404,1746,1551,1730,1565,1736,1763,1499,1337,1176,1467,1752,1758,1195,516,437,473,437,362,1568,1355,1327,1180,1208,1581,1957,1905,1822,1778,1899,1672,1682,1160,1866,1556,1657,1800,1907,1708,630,1885,1945,1488,1172,1560,1415,1442,1543,1408,1670,1828,1785,738,1280,1918,1794,1798,1624,1429,800,828,940,572,916,644,571,622,598,672,941,730,791,764,710,689,782,731,786,619,934,1e3,995,1022,1116,1035,914,1970,1387,1221,1068,956,1259,1300,1564,1490,1177,1615,1674,1666,1458,1393,1407,1361,1159,1393,1554,1274,1570,1530,1641,1766,1394,1380,1160,1359,1056,713,1304,1014,1064,1418,1556,1593,1554,1531,1554,1587,1580,1825,2048,2048,2048,2048,1976,2011,2048,2048,2048,1986,2048,1872,1233,1241,1334,1185,1405,1576,1607,1984,2048,2048,2048,2048,2048,1579,1093,1194,1055,1063,1066,1053,1064,1054,1073,1137,1147,1178,1120,1125,1053,1042,1112,1119,1063,1156,1320,1190,753,1397,1565,1541,1796,1163,1879,1475,1960,1930,1966,1919,1853,1910,1858,1935,1939,1915,1652,1720,1643,1817,1714,1953,1933,1889,1335,1732,1949,1836,1750,1852,1745,1727,1796,1771,1857,1854,1961,1896,1875,1890,1846,1827,1653,1679,1755,1788,1830,1854,1838,1877,1923,1591,1898,1928,1983,2006,1877,1775,1841,1857,1268,1313,974,948,1275,1610,1729,1906,1964,1684,1490,1683,1501,1607,1873,1699,1196,1634,1559,1484,834,1813,1640,1266,1767,1853,1556,1502,1027,1482,1260,1359,1387,1456,1713,1779,1942,1968,2011,2018,1764,1975,1858,1905,1977,1606,1448,1266,1944,1840,1689,1922,1813,1604,1758,1634,1537,1763,1615,1792,1809,1887,1800,1757,1624,1384,1671,1606,1639,1806,1736,1829,1735,1631,1430,1819,2048,2057,2025,2053,2048,2010,1137,1233,1447,1553,2044,2048,2048,1508,1083,1200,1066,1160,1068,1175,1223,1108,1121,1075,1176,1611,1639,1483,913,1224,1255,1427,1614,1602,1450,1585,1288,1267,1545,1303,1637,1671,1564,1358,1659,1389,1843,1511,1382,1412,877,1695,1408,1629,1906,1962,1966,1840,1899,1799,1888,1873,1839,1825,1878,1892,1938,1916,1763,1743,1696,1736,1797,1592,1835,1930,1846,1616,1733,1831,1989,1797,1715,1741,1810,1688,1789,1836,1859,1860,1796,1894,1899,1933,1777,1746,1846,1837,1703,1679,1717,1711,1612,1558,1835,1844,1869,1936,1845,1830,1942,1741,1708,1829,1876,1851,1957,1962,1972,1849,1640,1416,1465,1544,1563,1681,1623,1691,1850,1839,1816,1841,1870,1836,1521,1452,1398,1239,1126,1279,1531,1647,1854,1754,1991,1839,1846,1962,1945,1978,1521,1363,1612,1631,1605,1634,1810,1793,1067,1442,1494,1621,1640,1128,1217,1551,1785,1599,1580,1433,1591,1317,1062,1494,1117,1261,1363,1358,1516,1320,1541,1456,1600,1881,1921,2029,1960,1684,1941,1913,1827,1974,1235,1657,645,1829,2006,1488,1497,1382,1554,1937,1758,1414,1545,1414,1416,1579,1552,1686,1711,1449,1304,1048,1396,1661,1788,1485,544,467,465,464,385,1217,1271,1345,1033,1288,1363,1781,1976,1758,1864,1121,1896,1646,1846,1763,1745,1976,1880,1963,1974,1057,1420,1630,1733,1856,1045,951,1892,1749,1773,1090,727,848,723,745,656,686,564,783,656,1008,861,819,674,645,866,703,781,592,684,852,1078,995,981,1068,1193,956,1321,1922,1290,1098,1012,952,1237,1545,1578,1372,1456,1569,1766,1454,1920,1930,1487,1042,1604,875,1074,982,1361,1185,1556,1596,1789,2048,2054,2048,2048,1931,2048,2048,2056,1993,2045,1134,1161,1393,1182,1326,1495,1621,1872,2048,2048,2048,2048,1978,1172,1079,1169,1051,1154,1049,1056,1063,1062,1162,1189,1129,1121,1085,1053,1085,1197,1104,1305,1389,1673,1528,1614,1734,1895,1889,1931,1905,1896,1471,1277,1366,1558,1401,1520,1509,1696,1262,1723,1619,1904,2048,2048,2048,1955,1824,1864,1816,1880,1820,1356,1250,1266,1232,1204,1231,1058,1385,1404,1142,1207,1312,1378,1734,1857,1710,1637,1629,1569,1791,1737,1665,1754,1594,1777,1766,1703,1795,1586,1820,1774,1616,1695,1697,1593,1725,1753,1364,1306,1407,1697,1875,1925,1481,1182,1469,1521,1378,1487,1816,1622,1171,1469,1492,1571,1538,1539,1563,1643,1509,1426,1219,1332,1276,1303,1445,1518,1188,1431,1283,1556,1831,1846,1881,1820,1768,1906,1651,1869,1885,1861,1896,1823,1866,1870,1939,1607,1297,1348,1543,1318,1823,2048,1248,1053,1194,1054,1130,1295,1976,1708,1856,1790,1940,1964,1963,1898,1960,1905,1960,1891,1911,1975,1956,1942,1950,1951,1738,1486,1906,1798,1924,1883,1927,1805,1917,1837,1929,1898,1900,1945,1421,1282,1434,1443,1415,1452,1626,1284,1651,1192,926,1733,1202,1177,989,1748,1271,1823,1371,1275,1571,756,644,1776,1703,1485,1854,1490,1962,1967,1980,1953,1840,1906,1943,1940,1853,1971,1902,1800,1805,1707,1707,1759,1786,1961,1951,1956,1651,1721,1670,1996,1859,1726,1745,1873,1843,1676,1679,1836,1744,1814,1831,1992,1960,1934,1855,1819,1876,1920,1884,1740,1736,1736,1673,1670,1664,1785,1722,1799,1875,1633,1725,1803,1861,1751,1773,1498,1710,1740,1927,1887,1855,1798,1962,1985,1964,1979,1373,1482,1397,1437,1541,1611,1527,1543,1410,1868,1847,1602,1895,1825,1697,1393,1449,1285,1100,1176,1453,1719,1751,1779,2001,1738,1917,1961,1881,1995,1523,1464,1472,1602,1597,1494,1866,1836,1215,1602,1442,1657,1476,1380,1187,1577,1639,1469,1568,1498,1476,1390,1048,1484,1114,1282,1356,1320,1522,1392,1560,1330,1628,1804,1917,2003,2029,1612,1989,1917,1816,1947,1426,1610,631,1793,1946,1593,1401,1424,1615,1860,1819,1416,1484,1437,1473,1535,1523,1712,1712,1507,1580,957,1227,1625,1796,1636,807,490,461,460,400,795,1377,1365,1125,1098,1182,1580,1959,1938,1912,1710,1577,1283,1485,1869,1603,1586,1694,1929,1809,1951,1710,996,1532,1602,1220,1593,1528,1503,1702,1826,1097,815,1921,1723,1547,1845,828,821,824,686,824,550,681,602,753,627,1016,794,833,744,571,869,784,743,657,521,885,988,1034,996,1160,1111,937,1722,1711,1068,1082,973,1001,1393,1525,1445,1173,1709,1640,1699,1212,1193,1052,1265,1514,1422,1785,1617,1306,1243,1558,1025,1194,723,1341,1178,872,1608,1538,1518,1622,1830,2048,2048,2048,2048,2001,1986,2048,2048,2048,1985,2048,1875,1238,1241,1339,1196,1432,1567,1580,2011,2048,2048,2048,2048,2048,1454,1085,1195,1049,1055,1093,1053,1068,1070,1054,1147,1131,1152,1126,1107,1054,1058,1112,1130,1065,1187,1304,1111,756,1601,1574,1093,1279,1543,1213,1574,1867,1906,1964,1963,1883,1916,1922,1980,1945,1939,1938,1928,1888,1749,1583,1934,1953,1928,1884,1790,1918,1852,1733,1891,1845,1835,1831,1657,1695,1655,1800,1757,1959,1901,1698,1589,1867,1796,1728,1779,1854,1911,1803,1806,1893,1880,1884,1928,1873,1795,1852,1857,1714,1690,1861,1908,1874,1867,1812,1862,1801,1883,1867,1939,1868,1728,1849,1816,1549,1278,1084,909,1134,1411,1766,1743,1944,1963,1585,1427,1599,1507,1581,1840,1690,1333,1650,1588,1533,1088,1739,1619,1459,1592,1776,1793,1803,1244,1189,1434,1135,1514,1368,1477,1735,1925,1638,1905,1774,1740,1777,1838,1321,2027,2048,2048,2029,2048,1578,1155,1328,1441,1905,2048,2048,1384,1099,1177,1099,1075,1055,1190,1199,1100,1289,1583,1389,1950,1853,1696,1802,1628,1722,1802,1328,1739,1622,1680,1551,1370,1328,1722,1828,1883,1917,1793,1936,1554,1291,1329,1537,1290,1758,1049,1455,1408,1713,1790,1557,1294,1309,1528,1301,1536,1534,1597,1748,1927,2048,2049,1916,1808,1888,1836,1884,1852,1383,1219,1332,1126,1154,1239,1250,1063,1319,1428,1351,1361,1193,929,1480,1858,1798,1725,1688,1730,1718,1859,1716,1819,1785,1718,1716,1835,1542,1932,1705,1764,1777,1665,1710,1585,1692,1815,1452,1582,1744,1877,1900,1352,1318,1373,1408,1416,1459,1357,1754,1874,1884,1813,1837,1738,1707,1880,1903,1883,1893,1920,1332,1348,1509,1380,1479,2036,1217,1094,1166,1632,1996,1733,1883,1933,1953,1906,1922,1906,1974,1968,1930,1963,1957,1931,1743,1511,1253,1812,1352,1859,1503,1404,1373,691,1102,1694,1392,1554,1966,1940,1979,1799,1851,1816,1896,1789,1735,1837,1877,1951,1925,1681,1742,1554,1771,1696,1789,1949,1923,1739,1721,1706,1968,1777,1697,1719,1880,1684,1779,1838,1811,1825,1845,1882,1851,1947,1835,1828,1862,1735,1685,1834,1735,1770,1677,1624,1505,1774,1809,1837,1843,1668,1903,1931,1853,1862,1996,1867,1960,1994,1955,1962,1567,1573,1423,1586,1585,1665,1602,1575,1889,1893,1833,1695,1933,1861,1804,1459,1547,1344,1114,1194,1407,1464,1861,1732,1970,1957,1647,1960,1980,1885,1976,1473,1364,1519,1567,1588,1603,1876,1771,1207,1417,1587,1555,1615,1208,1293,1645,1527,1570,1670,1246,1706,1078,1121,1404,1100,1315,1279,1447,1431,1432,1440,1451,1727,1878,1917,2036,1859,1759,1934,1912,1912,1971,1117,1646,749,1923,2018,1421,1530,1483,1586,1906,1740,1421,1548,1372,1376,1634,1581,1671,1780,1489,1405,977,1329,1620,1810,1558,651,454,453,443,376,1041,1292,1373,1118,1186,1173,1662,1951,1952,1931,1157,1740,1886,1742,1615,1635,1936,1881,1981,1960,1397,1229,1551,1692,1835,1529,715,1611,1797,1804,1764,1096,738,850,724,738,656,680,560,783,655,992,863,824,677,634,865,705,782,584,692,854,1058,994,981,1072,1190,944,1321,1930,1290,1101,1002,954,1241,1543,1578,1373,1447,1679,1799,1436,1931,1836,1537,1118,1425,1044,736,1398,1145,1305,1599,1528,1549,1595,1623,2041,2048,2048,2048,2038,1964,2048,2048,2048,1996,1814,1202,1268,1339,1194,1397,1582,1596,1986,2048,2048,2048,2048,1766,1118,1134,1140,1054,1140,1052,1069,1072,1121,1137,1197,1118,1121,1071,1053,1116,1114,1180,1320,1477,1570,1911,1275,1349,1671,540,528,1736,1867,1638,1937,1954,1874,1900,1865,1892,1887,1845,1899,1952,1805,1844,1717,1646,1706,1640,1880,1860,1916,1667,1653,1677,1920,1762,1705,1652,1784,1732,1653,1804,1771,1805,1884,1839,1973,1772,1873,1856,1584,1563,1702,1560,1592,1608,1749,1842,1546,1646,1911,1856,1866,1833,1861,1863,1896,1928,1781,1911,1737,1664,1917,1780,1786,1410,1382,1196,1186,1046,1350,1500,1801,1799,1980,1280,1594,1586,1508,1480,1761,1602,1349,1354,1473,1358,1115,1261,1360,1325,1524,1351,1618,1524,1848,1804,1787,1618,592,473,453,437,387,934,1322,1416,1483,1458,1558,1687,1925,1734,1748,1820,1816,1652,1619,1761,1877,1862,1871,1906,1949,1575,1514,1611,1893,1429,1730,1721,1091,1164,631,1664,1556,1559,1496,1622,2015,2048,2048,1959,2057,2056,1947,1154,1227,1429,1550,2038,2048,2048,1718,1099,1187,1065,1100,1066,1054,1168,1228,1115,1075,1054,1206,1704,1457,1135,1335,1518,1265,1532,1745,1939,1439,1475,1591,672,1377,1877,1656,1931,1935,1828,1895,1787,1972,1817,1583,1548,1710,1841,1915,1924,1838,1743,1728,1836,1975,1731,1656,1779,1911,1798,1783,1895,1824,1806,1877,1892,1863,1906,1669,1825,1722,1780,1859,1572,1665,1767,1627,1787,1864,1569,1762,1740,1915,1865,1890,1873,1909,1905,1874,1880,1929,1904,1811,1780,1838,1885,1961,1775,1769,1476,1443,1271,1068,1184,1479,1562,1761,1969,1486,1560,1513,1610,1436,1595,1749,1249,1285,1651,1123,1266,1158,1454,1163,1501,1431,1469,1755,1947,1495,548,432,449,435,385,1100,1354,1466,1394,1456,1491,1745,1967,1763,1752,1793,1966,1685,1777,1841,1948,1931,1916,1910,1922,1957,1375,1635,1373,1869,1602,1860,1942,1096,1233,568,1706,1606,1551,1590,1851,2048,2048,2048,1938,2048,2001,1395,1175,1349,1511,1919,2048,2048,2039,1182,1084,1170,1129,1051,1067,1144,1214,1152,1137,1053,1201,1406,1576,1447,1777,1114,1337,1515,1279,1529,1753,1852,1993,1959,1840,1959,1936,1941,1981,1913,1632,1623,1569,1604,1519,1565,1544,1547,1556,1477,1595,1434,1670,1403,1339,1384,1251,1268,1188,1196,1211,956,955,986,789,1440,1274,1298,1226,887,1162,1049,768,1313,1254,1177,1156,689,1250,1132,1169,1301,870,1075,865,984,1300,1330,1289,1169,933,1128,1070,1024,862,812,1383,1186,1166,1280,850,1063,1029,927,728,1372,1195,1239,1176,865,1103,919,867,960,1100,1011,1128,1021,931,873,698,1335,1225,1304,1350,979,1035,1060,1012,969,704,810,1387,1192,968,1009,1277,1259,1222,1280,1105,909,1131,994,806,1105,1016,1074,1096,948,849,861,737,1362,1234,1196,1202,902,1137,1093,828,1124,1176,1178,1236,963,906,1042,930,814,1448,1213,1275,1126,917,1069,807,1120,1271,1198,1290,1048,910,1059,719,1298,1155,1212,1242,846,1041,914,702,1354,971,924,902,909,1014,1233,1047,1080,1120,913,865,811,1115,1201,1198,1242,1001,915,1057,1035,693,1274,1162,1217,1259,876,1061,921,748,1154,1143,1032,1228,1200,1157,1238,905,881,1033,658,1256,1219,1201,1267,877,1021,1006,765,1355,1179,1221,1289,870,1040,1008,951,740,1408,1202,1244,1208,863,1098,976,764,1174,1245,1216,1250,979,958,951,764,1379,1304,1312,1315,979,1059,990,1031,977,792,1019,1353,1207,1199,1118,867,1099,871,695,1366,1177,1179,1217,872,1140,1049,795,1186,1192,1220,1268,953,948,999,949,685,1345,1167,1204,1295,851,1070,1e3,829,1083,1303,1188,1320,840,976,1382,1293,1318,1163,933,1179,1054,750,1255,1027,1074,1135,928,847,812,979,1287,1337,1282,1081,919,1066,724,1314,903,1382,1073,1326,1135,1203,1227,889,952,1013,913,731,1373,1283,1304,1332,1006,1084,1041,1042,986,817,1026,1397,1135,1226,1166,880,1212,701,1300,1104,1196,1268,822,994,1011,898,717,1357,1162,1216,1121,868,1123,1074,692,1255,1095,1192,1251,867,1077,875,978,1246,1172,1253,1105,951,1155,1006,727,1343,1170,1197,1126,837,1006,828,930,1118,1033,1119,947,838,912,873,1269,1e3,1039,1112,799,906,879,1096,1339,1220,1284,1126,937,1006,986,940,674,631,687,687,727,755,653,693,660,648,727,698,678,619,591,774,773,625,668,713,696,723,727,650,1363,1174,1215,1225,844,967,967,983,688,619,656,647,673,750,680,701,636,750,637,738,615,631,707,803,668,643,719,715,699,703,783,650,800,704,1448,1219,1240,1208,913,974,1021,997,661,610,673,669,682,787,682,650,645,641,775,633,710,617,611,761,799,638,728,694,757,1384,1181,1213,1182,913,959,970,973,642,610,668,646,693,764,636,748,648,684,743,694,632,519,605,757,732,625,686,732,683,691,575,771,665,669,724,623,1157,1019,1090,1137,812,890,891,1038,1422,1158,1275,1095,920,960,965,961,655,612,666,652,721,749,670,688,648,767,617,715,602,628,745,787,633,655,777,720,696,662,784,674,761,909,1149,1017,1127,935,819,889,854,1393,1166,1210,1284,915,1006,971,1001,714,622,646,626,675,773,691,683,686,659,768,675,639,556,617,735,747,662,633,748,718,712,611,726,716,633,760,603,1029,1225,1139,849,1124,1513,1303,1347,1371,1190,1329,1173,1234,1271,909,974,958,999,729,618,648,661,682,733,731,670,695,624,718,697,724,620,618,626,804,782,644,801,688,703,679,956,1336,1157,1239,1097,930,1029,964,877,623,656,664,747,771,669,688,625,773,623,753,631,629,760,795,696,678,683,696,697,787,1609,1331,876,1122,355,381,564,694,715,847,939,853,1076,1019,990,726,910,984,977,923,852,906,662,591,486,327,393,597,1732,1985,2048,2048,2048,2048,2048,2048,2057,2048,2048,1904,1620,1172,1616,1637,1974,1688,1771,1595,1969,1517,1875,1146,1542,1265,745,1853,1875,1098,96,245,76,1300,1278,1500,1931,1638,1599,1663,1575,1668,1358,1422,1323,853,1367,1150,857,1487,1233,704,1562,1540,1642,950,1673,1642,1629,951,1594,1605,1506,1111,1538,1489,1142,1335,1487,1561,920,1697,1554,1621,1184,1279,1467,1445,1382,1689,2048,1894,1361,1034,1350,1304,1429,1177,1209,1163,1181,1447,1382,1254,1291,1104,874,1233,1113,1118,1273,1420,1759,2057,2048,2048,2048,2048,2048,1959,1897,1932,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2056,2048,2048,2048,2048,2048,2057,2048,2048,2056,2048,2048,2048,2056,2053,2055,2048,2048,2048,2048,2056,2048,2045,1871,2048,2048,2048,2057,2057,2048,2048,2048,2048,2048,2057,2048,2048,2048,1829,2007,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2055,2048,2039,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1383,1953,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,882,1796,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1750,1367,1831,1763,1637,1011,1183,1293,1382,1341,1335,1340,1372,1336,1333,1332,1386,1377,1401,1331,1346,1343,1362,1366,1354,1341,1300,1073,1831,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2016,2048,2056,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1615,1451,1517,1510,1453,1485,1403,1448,1440,1435,1411,1559,1542,1581,1679,1557,1571,1640,1601,1726,1785,1773,1998,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2057,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,2048,1931,1395,1048,1150,1138,1098,1180,1127,1043,1096,1080,995,1182,1080,922,1026,1096,1116,1099,1220,1231,940,1164,1169,1097,1266,1223,1183,1077,1044,1019,1182,1055,925,627,781,1021,1155,1100,1135,784,972,1287,1115,1145,1484,1256,1182,1084,1034,986,1169,1006,1081,1241,1311,1355,1143,1130,917,1079,1074,1280,1234,1094,1230,1185,1203,1022,900,1272,1042,1018,1041,800,1064,904,1026,1219,666,971,987,1327,1089,1362,1276,1178,1080,1081,916,1003,1206,1e3,882,1102,1202,1001,958,998,958,759,950,972,956,923,913,1219,1477,1168,1002,1064,894,1059,913,1038,1118,998,834,1085,1050,1145,998,891,1034,1188,1350,978,908,978,986,1179,1152,911,1046,1156,1050,954,1232,1266,1149,980,1167,1258,1203,1267,1325,1415,1388,1218,1172,1134,1468,1273,1210,1126,983,1108,1141,1306,1024,1169,1196,1181,906,1167,1177,1172,984,1187,1109,1136,1283,1222,1186,1255,1230,1326,1286,1171,1136,1233,1147,1184,1189,1268,1005,1106,1149,1125,1228,1369,1297,1185,976,1210,1301,1260,1356,1224,1179,953,987,1402,841,967,1124,1325,1213,1057,1141,1140,1122,1091,1119,1029,1364,1185,662,655,760,300,675,789,696,707,614,611,804,758,755,743,594,606,673,710,562,532,569,643,803,651,574,341,408,415,474,591,628,584,462,464,444,418,352,347,351,349,445,417,422,430,422,268,356,477,327,454,358,404,355,306,224,226,219,225,219,235,235,222,245,236,232,231,231,234,228,230,228,244,228,226,225,225,222,223,225,224,240,225,222,223,231,228,233,229,224,220,245,233,228,235,228,235,230,237,227,235,229,229,228,227,227,227,229,224,400,445,371,396,366,449,357,317,667,568,477,424,942,475],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,0,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,0,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,0,0,0,1,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,0,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,0,0,0,1,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,0,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,1,0,0,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_matplotlib.data")}Module["addRunDependency"]("datafile_matplotlib.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/pylab.py",start:0,end:90,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9-nspkg.pth",start:90,end:659,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/__init__.py",start:659,end:51401,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_animation_data.py",start:51401,end:59130,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_cm.py",start:59130,end:125696,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_cm_listed.py",start:125696,end:235158,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_color_data.py",start:235158,end:270105,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_constrained_layout.py",start:270105,end:297523,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_internal_utils.py",start:297523,end:299663,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_layoutbox.py",start:299663,end:323297,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_mathtext_data.py",start:323297,end:379720,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_pylab_helpers.py",start:379720,end:384220,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_text_layout.py",start:384220,end:385256,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/afm.py",start:385256,end:401833,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/animation.py",start:401833,end:469080,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/artist.py",start:469080,end:523080,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axis.py",start:523080,end:615744,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backend_bases.py",start:615744,end:743040,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backend_managers.py",start:743040,end:756899,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backend_tools.py",start:756899,end:791834,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/bezier.py",start:791834,end:811310,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/blocking_input.py",start:811310,end:822611,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/category.py",start:822611,end:829775,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/cm.py",start:829775,end:846518,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/collections.py",start:846518,end:923530,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/colorbar.py",start:923530,end:989296,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/colors.py",start:989296,end:1067592,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/container.py",start:1067592,end:1071897,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/contour.py",start:1071897,end:1140998,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/dates.py",start:1140998,end:1207899,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/docstring.py",start:1207899,end:1210335,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/dviread.py",start:1210335,end:1250567,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/figure.py",start:1250567,end:1356501,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/font_manager.py",start:1356501,end:1405160,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/fontconfig_pattern.py",start:1405160,end:1411811,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/gridspec.py",start:1411811,end:1444865,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/hatch.py",start:1444865,end:1451753,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/image.py",start:1451753,end:1518707,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/legend.py",start:1518707,end:1566324,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/legend_handler.py",start:1566324,end:1592535,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/lines.py",start:1592535,end:1644084,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/markers.py",start:1644084,end:1676072,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mathtext.py",start:1676072,end:1796299,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mlab.py",start:1796299,end:1831965,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/offsetbox.py",start:1831965,end:1891897,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/patches.py",start:1891897,end:2040802,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/path.py",start:2040802,end:2080245,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/patheffects.py",start:2080245,end:2093845,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/pylab.py",start:2093845,end:2095536,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/pyplot.py",start:2095536,end:2211245,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/quiver.py",start:2211245,end:2258529,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/rcsetup.py",start:2258529,end:2315785,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/sankey.py",start:2315785,end:2352123,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/scale.py",start:2352123,end:2375746,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/spines.py",start:2375746,end:2395969,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/stackplot.py",start:2395969,end:2399877,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/streamplot.py",start:2399877,end:2422793,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/table.py",start:2422793,end:2449353,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/texmanager.py",start:2449353,end:2465221,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/text.py",start:2465221,end:2531313,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/textpath.py",start:2531313,end:2546295,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/ticker.py",start:2546295,end:2650880,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tight_bbox.py",start:2650880,end:2653815,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tight_layout.py",start:2653815,end:2666971,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/transforms.py",start:2666971,end:2763768,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/ttconv.py",start:2763768,end:2764007,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/type1font.py",start:2764007,end:2776337,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/units.py",start:2776337,end:2783612,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/widgets.py",start:2783612,end:2876822,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_version.py",start:2876822,end:2877293,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_contour.so",start:2877293,end:2933654,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/ft2font.so",start:2933654,end:3007158,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_image.so",start:3007158,end:3247899,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_path.so",start:3247899,end:3371357,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_qhull.so",start:3371357,end:3751826,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_tri.so",start:3751826,end:3810696,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/_ttconv.so",start:3810696,end:3860304,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/fontlist.json",start:3860304,end:3884945,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axes/__init__.py",start:3884945,end:3884991,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axes/_axes.py",start:3884991,end:4198054,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axes/_base.py",start:4198054,end:4357096,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axes/_secondary_axes.py",start:4357096,end:4370456,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/axes/_subplots.py",start:4370456,end:4380114,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/__init__.py",start:4380114,end:4380221,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/_backend_pdf_ps.py",start:4380221,end:4383995,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/_backend_tk.py",start:4383995,end:4416854,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_agg.py",start:4416854,end:4439860,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_cairo.py",start:4439860,end:4458540,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3.py",start:4458540,end:4491634,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3agg.py",start:4491634,end:4494438,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_gtk3cairo.py",start:4494438,end:4495789,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_macosx.py",start:4495789,end:4501467,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_mixed.py",start:4501467,end:4506657,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_nbagg.py",start:4506657,end:4515208,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_pdf.py",start:4515208,end:4612222,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_pgf.py",start:4612222,end:4655667,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_ps.py",start:4655667,end:4702398,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt4.py",start:4702398,end:4702911,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt4agg.py",start:4702911,end:4703290,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt4cairo.py",start:4703290,end:4703606,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt5.py",start:4703606,end:4742832,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt5agg.py",start:4742832,end:4745992,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_qt5cairo.py",start:4745992,end:4747811,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_svg.py",start:4747811,end:4796972,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_template.py",start:4796972,end:4805414,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_tkagg.py",start:4805414,end:4806090,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_tkcairo.py",start:4806090,end:4807159,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_webagg.py",start:4807159,end:4818214,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_webagg_core.py",start:4818214,end:4836247,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_wx.py",start:4836247,end:4896821,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_wxagg.py",start:4896821,end:4899753,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/backend_wxcairo.py",start:4899753,end:4901586,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/qt_compat.py",start:4901586,end:4909655,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/browser_backend.py",start:4909655,end:4925886,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/wasm_backend.py",start:4925886,end:4929578,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/html5_canvas_backend.py",start:4929578,end:4945610,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/_backend_agg.so",start:4945610,end:5222975,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/backends/_tkagg.so",start:5222975,end:5242359,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/cbook/__init__.py",start:5242359,end:5319576,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/cbook/deprecation.py",start:5319576,end:5338488,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/compat/__init__.py",start:5338488,end:5338582,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/projections/__init__.py",start:5338582,end:5340250,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/projections/geo.py",start:5340250,end:5357707,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/projections/polar.py",start:5357707,end:5411907,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/sphinxext/__init__.py",start:5411907,end:5411907,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/sphinxext/mathmpl.py",start:5411907,end:5415666,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/sphinxext/plot_directive.py",start:5415666,end:5441896,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/style/__init__.py",start:5441896,end:5441963,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/style/core.py",start:5441963,end:5450449,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/__init__.py",start:5450449,end:5451851,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/compare.py",start:5451851,end:5468832,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/decorators.py",start:5468832,end:5487558,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/disable_internet.py",start:5487558,end:5492469,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/exceptions.py",start:5492469,end:5492607,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/widgets.py",start:5492607,end:5494116,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/Duration.py",start:5494116,end:5498574,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/Epoch.py",start:5498574,end:5504924,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/EpochConverter.py",start:5504924,end:5508089,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/StrConverter.py",start:5508089,end:5511042,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/UnitDbl.py",start:5511042,end:5518653,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py",start:5518653,end:5521752,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py",start:5521752,end:5522433,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/testing/jpl_units/__init__.py",start:5522433,end:5525117,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/__init__.py",start:5525117,end:5525373,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/triangulation.py",start:5525373,end:5533699,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/tricontour.py",start:5533699,end:5545013,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/trifinder.py",start:5545013,end:5548481,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/triinterpolate.py",start:5548481,end:5612856,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/tripcolor.py",start:5612856,end:5617863,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/triplot.py",start:5617863,end:5620626,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/trirefine.py",start:5620626,end:5633848,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/tri/tritools.py",start:5633848,end:5644427,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/matplotlibrc",start:5644427,end:5684364,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizOneSymBol.ttf",start:5684364,end:5696920,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/LICENSE_STIX",start:5696920,end:5702395,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmex10.ttf",start:5702395,end:5723487,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizTwoSymReg.ttf",start:5723487,end:5739191,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmsy10.ttf",start:5739191,end:5768587,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-BoldOblique.ttf",start:5768587,end:6021703,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizFiveSymReg.ttf",start:6021703,end:6035359,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansDisplay.ttf",start:6035359,end:6061071,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralBolIta.ttf",start:6061071,end:6242223,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/LICENSE_DEJAVU",start:6242223,end:6247039,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmtt10.ttf",start:6247039,end:6275175,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Italic.ttf",start:6275175,end:6620787,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizFourSymBol.ttf",start:6620787,end:6633015,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXNonUniIta.ttf",start:6633015,end:6679767,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneral.ttf",start:6679767,end:7127995,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono.ttf",start:7127995,end:7468235,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif.ttf",start:7468235,end:7847975,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans.ttf",start:7847975,end:8604047,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf",start:8604047,end:8935583,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerifDisplay.ttf",start:8935583,end:8949883,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizFourSymReg.ttf",start:8949883,end:8965855,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-Oblique.ttf",start:8965855,end:9599695,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXNonUniBol.ttf",start:9599695,end:9630207,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralBol.ttf",start:9630207,end:9867567,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmmi10.ttf",start:9867567,end:9900127,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXNonUniBolIta.ttf",start:9900127,end:9941399,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-Bold.ttf",start:9941399,end:10645527,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizTwoSymBol.ttf",start:10645527,end:10657643,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmr10.ttf",start:10657643,end:10683991,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Oblique.ttf",start:10683991,end:10935463,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXNonUni.ttf",start:10935463,end:10994571,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizOneSymReg.ttf",start:10994571,end:11014331,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXGeneralItalic.ttf",start:11014331,end:11189371,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmb10.ttf",start:11189371,end:11215051,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans-BoldOblique.ttf",start:11215051,end:11856771,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Bold.ttf",start:11856771,end:12212463,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizThreeSymBol.ttf",start:12212463,end:12224655,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSerif-BoldItalic.ttf",start:12224655,end:12571719,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/STIXSizThreeSymReg.ttf",start:12571719,end:12587555,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/cmss10.ttf",start:12587555,end:12607931,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/ttf/Humor-Sans.ttf",start:12607931,end:12633763,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/psyr.afm",start:12633763,end:12643407,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pbkd8a.afm",start:12643407,end:12658564,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pagk8a.afm",start:12658564,end:12675805,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/cmr10.afm",start:12675805,end:12685906,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pbkli8a.afm",start:12685906,end:12701087,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/putb8a.afm",start:12701087,end:12722619,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/ptmb8a.afm",start:12722619,end:12740602,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvro8an.afm",start:12740602,end:12758479,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pcrbo8a.afm",start:12758479,end:12773901,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/putr8a.afm",start:12773901,end:12796049,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pzdr.afm",start:12796049,end:12805516,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/ptmri8a.afm",start:12805516,end:12823584,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pcrr8a.afm",start:12823584,end:12838923,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pncbi8a.afm",start:12838923,end:12856419,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvb8a.afm",start:12856419,end:12873574,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pplb8a.afm",start:12873574,end:12889236,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pplbi8a.afm",start:12889236,end:12905046,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pncb8a.afm",start:12905046,end:12921074,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/cmex10.afm",start:12921074,end:12933144,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pcrro8a.afm",start:12933144,end:12948587,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/ptmr8a.afm",start:12948587,end:12966529,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pncr8a.afm",start:12966529,end:12983194,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/cmtt10.afm",start:12983194,end:12989695,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvlo8a.afm",start:12989695,end:13005424,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pzcmi8a.afm",start:13005424,end:13021674,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/ptmbi8a.afm",start:13021674,end:13039744,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvbo8an.afm",start:13039744,end:13056939,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pplri8a.afm",start:13056939,end:13072672,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/putri8a.afm",start:13072672,end:13094563,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pncri8a.afm",start:13094563,end:13111483,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pagd8a.afm",start:13111483,end:13128666,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvro8a.afm",start:13128666,end:13146585,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvbo8a.afm",start:13146585,end:13163815,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/cmmi10.afm",start:13163815,end:13174231,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pagko8a.afm",start:13174231,end:13191577,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pcrb8a.afm",start:13191577,end:13206929,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pplr8a.afm",start:13206929,end:13222681,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/cmsy10.afm",start:13222681,end:13230976,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvr8a.afm",start:13230976,end:13248815,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/putbi8a.afm",start:13248815,end:13270746,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pbkdi8a.afm",start:13270746,end:13286024,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvr8an.afm",start:13286024,end:13303805,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvb8an.afm",start:13303805,end:13320891,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pbkl8a.afm",start:13320891,end:13335891,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/pagdo8a.afm",start:13335891,end:13353146,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/afm/phvl8a.afm",start:13353146,end:13368773,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Courier-Oblique.afm",start:13368773,end:13384214,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Courier.afm",start:13384214,end:13399549,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Times-Italic.afm",start:13399549,end:13465877,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-Bold.afm",start:13465877,end:13535146,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Times-BoldItalic.afm",start:13535146,end:13594788,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica.afm",start:13594788,end:13669080,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Courier-BoldOblique.afm",start:13669080,end:13684479,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-BoldOblique.afm",start:13684479,end:13753844,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Courier-Bold.afm",start:13753844,end:13769177,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-Oblique.afm",start:13769177,end:13843569,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/ZapfDingbats.afm",start:13843569,end:13853096,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/readme.txt",start:13853096,end:13853924,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Symbol.afm",start:13853924,end:13863664,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Times-Bold.afm",start:13863664,end:13927915,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/fonts/pdfcorefonts/Times-Roman.afm",start:13927915,end:13988375,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots.gif",start:13988375,end:13989066,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back-symbolic.svg",start:13989066,end:13990578,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/hand.svg",start:13990578,end:13995466,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/matplotlib_128.ppm",start:13995466,end:14044633,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move.pdf",start:14044633,end:14046500,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home_large.png",start:14046500,end:14047290,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/matplotlib.pdf",start:14047290,end:14070142,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/hand.gif",start:14070142,end:14071409,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back.png",start:14071409,end:14071789,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots-symbolic.svg",start:14071789,end:14073919,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots.pdf",start:14073919,end:14075633,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect_large.gif",start:14075633,end:14077089,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back_large.gif",start:14077089,end:14077888,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots.png",start:14077888,end:14078333,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/matplotlib.png",start:14078333,end:14079616,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help.pdf",start:14079616,end:14081429,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move_large.gif",start:14081429,end:14082380,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help.gif",start:14082380,end:14082944,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/qt4_editor_options_large.png",start:14082944,end:14083563,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move.gif",start:14083563,end:14084242,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back.svg",start:14084242,end:14085754,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/hand_large.gif",start:14085754,end:14086727,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward_large.gif",start:14086727,end:14087513,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward.png",start:14087513,end:14087870,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home-symbolic.svg",start:14087870,end:14089761,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots_large.png",start:14089761,end:14090423,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help.png",start:14090423,end:14090895,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave_large.png",start:14090895,end:14091615,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move-symbolic.svg",start:14091615,end:14094124,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/qt4_editor_options.pdf",start:14094124,end:14095692,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help-symbolic.svg",start:14095692,end:14097562,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward.gif",start:14097562,end:14098152,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help.ppm",start:14098152,end:14099893,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/qt4_editor_options.png",start:14099893,end:14100273,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave.gif",start:14100273,end:14100996,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home.png",start:14100996,end:14101464,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward.svg",start:14101464,end:14102995,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect_large.png",start:14102995,end:14104011,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back_large.png",start:14104011,end:14104631,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots_large.gif",start:14104631,end:14105981,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help_large.ppm",start:14105981,end:14112906,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back.gif",start:14112906,end:14113514,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/subplots.svg",start:14113514,end:14115644,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move.svg",start:14115644,end:14118153,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/hand.png",start:14118153,end:14119132,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect.png",start:14119132,end:14119662,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave.png",start:14119662,end:14120120,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward.pdf",start:14120120,end:14121750,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move_large.png",start:14121750,end:14122517,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help.svg",start:14122517,end:14124387,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/help_large.png",start:14124387,end:14125134,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home_large.gif",start:14125134,end:14126556,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home.gif",start:14126556,end:14127222,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave_large.gif",start:14127222,end:14128720,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect.gif",start:14128720,end:14129416,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/qt4_editor_options.svg",start:14129416,end:14130660,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect.pdf",start:14130660,end:14132269,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/hand.pdf",start:14132269,end:14136441,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/matplotlib.svg",start:14136441,end:14198528,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave-symbolic.svg",start:14198528,end:14200557,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home.pdf",start:14200557,end:14202294,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/back.pdf",start:14202294,end:14203917,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/home.svg",start:14203917,end:14205808,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave.svg",start:14205808,end:14207837,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/move.png",start:14207837,end:14208318,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/matplotlib_large.png",start:14208318,end:14211406,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward_large.png",start:14211406,end:14211999,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/filesave.pdf",start:14211999,end:14213733,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect-symbolic.svg",start:14213733,end:14215212,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/zoom_to_rect.svg",start:14215212,end:14216691,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/images/forward-symbolic.svg",start:14216691,end:14218222,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-deep.mplstyle",start:14218222,end:14218364,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-notebook.mplstyle",start:14218364,end:14218746,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-ticks.mplstyle",start:14218746,end:14219411,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/fivethirtyeight.mplstyle",start:14219411,end:14220243,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/tableau-colorblind10.mplstyle",start:14220243,end:14220433,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-poster.mplstyle",start:14220433,end:14220836,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/fast.mplstyle",start:14220836,end:14221124,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-bright.mplstyle",start:14221124,end:14221268,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/bmh.mplstyle",start:14221268,end:14221980,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-whitegrid.mplstyle",start:14221980,end:14222644,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/Solarize_Light2.mplstyle",start:14222644,end:14223899,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-dark.mplstyle",start:14223899,end:14224566,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-colorblind.mplstyle",start:14224566,end:14224714,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/grayscale.mplstyle",start:14224714,end:14225240,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-white.mplstyle",start:14225240,end:14225905,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/dark_background.mplstyle",start:14225905,end:14226563,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/ggplot.mplstyle",start:14226563,end:14227520,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-muted.mplstyle",start:14227520,end:14227663,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn.mplstyle",start:14227663,end:14228793,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/_classic_test_patch.mplstyle",start:14228793,end:14228960,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-darkgrid.mplstyle",start:14228960,end:14229630,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-talk.mplstyle",start:14229630,end:14230033,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/classic.mplstyle",start:14230033,end:14254261,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-pastel.mplstyle",start:14254261,end:14254405,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-paper.mplstyle",start:14254405,end:14254798,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/stylelib/seaborn-dark-palette.mplstyle",start:14254798,end:14254940,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/Minduka_Present_Blue_Pack.png",start:14254940,end:14268574,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/None_vs_nearest-pdf.png",start:14268574,end:14374802,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/ada.png",start:14374802,end:14683115,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/logo2.png",start:14683115,end:14716656,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/jacksboro_fault_dem.npz",start:14716656,end:14890717,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/demodata.csv",start:14890717,end:14891376,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/grace_hopper.jpg",start:14891376,end:14952682,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/embedding_in_wx3.xrc",start:14952682,end:14954868,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/README.txt",start:14954868,end:14954996,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/s1045.ima.gz",start:14954996,end:14988225,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/msft.csv",start:14988225,end:14991436,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/percent_bachelors_degrees_women_usa.csv",start:14991436,end:14997117,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/membrane.dat",start:14997117,end:15045117,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/goog.npz",start:15045117,end:15067962,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/data_x_x2_x3.csv",start:15067962,end:15068094,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/ct.raw.gz",start:15068094,end:15324253,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/aapl.npz",start:15324253,end:15431756,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/topobathy.npz",start:15431756,end:15476980,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/eeg.dat",start:15476980,end:15502580,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/grace_hopper.png",start:15502580,end:16130860,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib/mpl-data/sample_data/axes_grid/bivariate_normal.npy",start:16130860,end:16132740,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/__init__.py",start:16132740,end:16132862,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/__init__.py",start:16132862,end:16133399,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/anchored_artists.py",start:16133399,end:16133690,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/angle_helper.py",start:16133690,end:16133741,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axes_divider.py",start:16133741,end:16133919,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axes_grid.py",start:16133919,end:16134008,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axes_rgb.py",start:16134008,end:16134055,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axes_size.py",start:16134055,end:16134103,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axis_artist.py",start:16134103,end:16134153,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axisline_style.py",start:16134153,end:16134206,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/axislines.py",start:16134206,end:16134254,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/clip_path.py",start:16134254,end:16134302,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/colorbar.py",start:16134302,end:16134473,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/floating_axes.py",start:16134473,end:16134525,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/grid_finder.py",start:16134525,end:16134575,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/grid_helper_curvelinear.py",start:16134575,end:16134637,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/inset_locator.py",start:16134637,end:16134853,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid/parasite_axes.py",start:16134853,end:16135291,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/__init__.py",start:16135291,end:16135495,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/anchored_artists.py",start:16135495,end:16155661,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/axes_divider.py",start:16155661,end:16181465,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/axes_grid.py",start:16181465,end:16205067,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py",start:16205067,end:16210235,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/axes_size.py",start:16210235,end:16217785,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/colorbar.py",start:16217785,end:16245699,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/inset_locator.py",start:16245699,end:16268811,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/mpl_axes.py",start:16268811,end:16273191,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axes_grid1/parasite_axes.py",start:16273191,end:16287590,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/__init__.py",start:16287590,end:16288305,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/angle_helper.py",start:16288305,end:16301518,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_divider.py",start:16301518,end:16301645,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_grid.py",start:16301645,end:16301992,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axes_rgb.py",start:16301992,end:16302175,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axis_artist.py",start:16302175,end:16344241,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axisline_style.py",start:16344241,end:16349280,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/axislines.py",start:16349280,end:16369059,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/clip_path.py",start:16369059,end:16372836,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/floating_axes.py",start:16372836,end:16385697,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/grid_finder.py",start:16385697,end:16396664,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py",start:16396664,end:16410922,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/axisartist/parasite_axes.py",start:16410922,end:16411337,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/mplot3d/__init__.py",start:16411337,end:16411364,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/mplot3d/art3d.py",start:16411364,end:16440608,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/mplot3d/axes3d.py",start:16440608,end:16544146,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/mplot3d/axis3d.py",start:16544146,end:16562957,audio:0},{filename:"/lib/python3.9/site-packages/mpl_toolkits/mplot3d/proj3d.py",start:16562957,end:16567223,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/PKG-INFO",start:16567223,end:16572658,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/SOURCES.txt",start:16572658,end:16822998,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/dependency_links.txt",start:16822998,end:16822999,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/namespace_packages.txt",start:16822999,end:16823012,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/requires.txt",start:16823012,end:16823131,audio:0},{filename:"/lib/python3.9/site-packages/matplotlib-3.3.3-py3.9.egg-info/top_level.txt",start:16823131,end:16823162,audio:0}],remote_package_size:11513770,package_uuid:"b01118ca-484f-4ecf-81bd-6b88513544cc"})})(); \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Ilenia Cad4.md b/spaces/quidiaMuxgu/Expedit-SAM/Ilenia Cad4.md deleted file mode 100644 index ab7ebf76995f7c6b7f090c66f20e27d408961780..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Ilenia Cad4.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Ilenia Cad4


      DOWNLOAD 🌟 https://geags.com/2uCr6o



      -
      -Обязанности: Присадка деталей на ЧПУ присадочном станке hirzt F8 Рабочая программа станка ilenia CAD4 Требования: Опыт работы на присадочном ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/rabiyulfahim/Prompt-Refinery-Text-to-Image-Generation/app.py b/spaces/rabiyulfahim/Prompt-Refinery-Text-to-Image-Generation/app.py deleted file mode 100644 index 446a01edbaef42dceb78abaf5db0fd0b8c2a06fd..0000000000000000000000000000000000000000 --- a/spaces/rabiyulfahim/Prompt-Refinery-Text-to-Image-Generation/app.py +++ /dev/null @@ -1,114 +0,0 @@ -import gradio as gr -import os -from share_btn import community_icon_html, loading_icon_html, share_js - -text_gen = gr.Interface.load(name="spaces/Gustavosta/MagicPrompt-Stable-Diffusion") -stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5") - -def get_images(prompt): - - gallery_dir = stable_diffusion(prompt, fn_index=2) - sd_output = [os.path.join(gallery_dir, image) for image in os.listdir(gallery_dir)] - return sd_output, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -def get_prompts(prompt_text): - return text_gen(prompt_text) - -css = ''' -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -a {text-decoration-line: underline;} -''' - -with gr.Blocks(css=css) as demo: - gr.HTML("""
      -
      -

      - Prompt Refinery -

      -
      -

      - 🏭 Prompt Refinery generates variations of your prompt using MagicPrompt and Stable Diffusion -

      -
      """) - - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Input text prompt", - lines=2, elem_id="input-text") - with gr.Row(): - see_prompts = gr.Button("✍️Expand my prompts") - - with gr.Column(): - text_output = gr.Textbox( - label="🏭 Expanded text prompts", - lines=8, - elem_id="translated" - ) - with gr.Row(): - diffuse_btn = gr.Button(value="🏭 Render Images for My Prompts") - with gr.Column(elem_id="generated-gallery"): - sd_output = gr.Gallery().style(grid=2, height="auto") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - with gr.Column(elem_id="generatedAI"): - TextAI = gr.Textbox( - label="🏭 Expanded AI text prompts", - lines=4, - elem_id="translated1" - ) - TextAI2 = gr.Textbox( - label="🏭 Expanded AI text prompts", - lines=4, - elem_id="translated2" - ) - - see_prompts.click(get_prompts, - inputs = [input_text], - outputs = [ - text_output - ], api_name="TextAI") - diffuse_btn.click(get_images, - inputs = [ - text_output - ], - outputs = [sd_output, community_icon, loading_icon, share_button], api_name="TextAI2") - share_button.click(None, [], [], _js=share_js) - - - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/radames/candle-segment-anything-wasm/index.html b/spaces/radames/candle-segment-anything-wasm/index.html deleted file mode 100644 index 7ded52082afe3ab6fd2b7378e851342b93e8e99a..0000000000000000000000000000000000000000 --- a/spaces/radames/candle-segment-anything-wasm/index.html +++ /dev/null @@ -1,574 +0,0 @@ - - - - Candle Segment Anything Model (SAM) Rust/WASM - - - - - - - - - - - - - - -
      - 🕯️ -
      -

      Candle Segment Anything

      -

      Rust/WASM Demo

      -

      - Zero-shot image segmentation with - Segment Anything Model (SAM) - and - MobileSAM . It runs in the browser with a WASM runtime built with - Candle - -

      -
      -
      - - -
      -
      -

      - Note: - The model's first run may take a few seconds as it loads and caches - the model in the browser, and then creates the image embeddings. Any - subsequent clicks on points will be significantly faster. -

      -
      -
      -
      -
      - -
      -
      - - - - -
      -
      -
      -
      - - - -
      - -
      - -
      - - -
      -
      - - - -
      -
      -
      -
      -

      Examples:

      - - - - -
      -
      -
      - - diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Avatar telugu movie full hd download Watch the epic sci-fi adventure online.md b/spaces/raedeXanto/academic-chatgpt-beta/Avatar telugu movie full hd download Watch the epic sci-fi adventure online.md deleted file mode 100644 index 17b93948a67adb95e711fbd2d871842f3feb00b9..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Avatar telugu movie full hd download Watch the epic sci-fi adventure online.md +++ /dev/null @@ -1,122 +0,0 @@ -
      -

      Avatar Telugu Movie Full HD Download

      -

      Are you a fan of sci-fi movies? Do you love watching epic adventures in a stunning alien world? If yes, then you must have heard of Avatar, one of the most successful and acclaimed movies of all time. But did you know that you can also watch Avatar in Telugu, one of the most widely spoken languages in India? In this article, we will tell you everything you need to know about Avatar Telugu movie full HD download options, risks and precautions.

      -

      Avatar telugu movie full hd download


      DOWNLOADhttps://tinourl.com/2uKZ78



      -

      Introduction

      -

      What is Avatar?

      -

      Avatar is a 2009 American epic science fiction film directed by James Cameron, who also wrote and produced it. The film is set in the year 2154, when humans are mining a precious mineral called unobtanium on Pandora, a lush moon of a gas giant in the Alpha Centauri star system. The expansion of the mining colony threatens the existence of a local tribe of Na'vi, a humanoid species that live in harmony with nature and worship a mother goddess called Eywa.

      -

      The film follows the story of Jake Sully, a former Marine who becomes part of the Avatar Program, a project that allows human operators to control genetically engineered Na'vi bodies using a mental link. Jake is assigned to infiltrate the Na'vi tribe and persuade them to relocate, but he soon falls in love with Neytiri, the daughter of the tribe's leader, and becomes torn between his loyalty to his own species and his respect for the Na'vi culture.

      -

      Why is Avatar popular?

      -

      Avatar is not only a captivating story of love, betrayal and redemption, but also a groundbreaking cinematic achievement that revolutionized the use of computer-generated imagery (CGI) and 3D technology. The film features stunning visuals of Pandora's exotic flora and fauna, breathtaking action sequences and realistic motion-capture performances by the actors. The film also explores themes such as environmentalism, colonialism, spirituality and identity.

      -

      Avatar was released in December 2009 and received critical acclaim and widespread popularity. It became the highest-grossing film of all time until 2019, when it was surpassed by Avengers: Endgame. It also won three Academy Awards for Best Cinematography, Best Visual Effects and Best Art Direction. The film has spawned a franchise that includes video games, comics, novels and theme park attractions. A sequel, Avatar 2, is scheduled to be released in December 2022.

      -

      How to watch Avatar in Telugu?

      -

      If you are a fan of Telugu cinema or want to enjoy Avatar in your native language, you might be wondering how to watch Avatar in Telugu. The good news is that there are several options available for you to download or stream Avatar Telugu movie full HD online. However, before you proceed, you should be aware of the risks and precautions involved in downloading or streaming movies from unauthorized sources. We will discuss them later in this article.

      -

      Avatar telugu movie 1080p free download
      -Avatar full movie in telugu hd quality online
      -Download avatar telugu dubbed movie bluray
      -Watch avatar telugu movie full length hd
      -Avatar 2009 telugu movie download torrent
      -Avatar telugu movie hd video songs download
      -Avatar full movie in telugu with english subtitles
      -Avatar telugu movie download filmywap
      -Avatar telugu movie online watch dailymotion
      -Avatar full movie telugu dubbed download movierulz
      -Avatar telugu movie hd wallpapers download
      -Avatar full movie in telugu youtube
      -Avatar telugu movie download utorrent
      -Avatar telugu movie online streaming hd
      -Avatar full movie in telugu free download mp4
      -Avatar telugu movie download tamilrockers
      -Avatar telugu movie watch online free hd
      -Avatar full movie in telugu dubbed 720p download
      -Avatar telugu movie trailer hd download
      -Avatar telugu movie download jio rockers
      -Avatar full movie in telugu amazon prime video
      -Avatar telugu movie hd images download
      -Avatar telugu movie online with subtitles
      -Avatar full movie in telugu netflix
      -Avatar telugu movie download isaimini
      -Avatar full movie in telugu hotstar
      -Avatar telugu movie hd posters download
      -Avatar telugu movie online free youtube
      -Avatar full movie in telugu disney plus hotstar
      -Avatar telugu movie download telegram link
      -Avatar full movie in telugu mx player
      -Avatar telugu movie hd stills download
      -Avatar telugu movie online watch hd 123movies
      -Avatar full movie in telugu zee5
      -Avatar telugu movie download kuttymovies
      -Avatar full movie in telugu dubbed watch online free
      -Avatar telugu movie hd screenshots download
      -Avatar telugu movie online watch free putlocker
      -Avatar full movie in telugu dubbed dailymotion
      -Avatar telugu movie download moviesda
      -Avatar full movie in telugu sony liv
      -Avatar telugu movie hd photos download
      -Avatar telugu movie online watch free 123mkv
      -Avatar full movie in telugu dubbed youtube part 1
      -Avatar telugu movie download pagalworld
      -Avatar full movie in telugu voot select
      -Avatar telugu movie hd pics download
      -Avatar telugu movie online watch free einthusan
      -Avatar full movie in telugu dubbed youtube part 2

      -

      Avatar Telugu Movie Full HD Download Options

      -

      Hotstar

      -

      One of the easiest and safest ways to watch Avatar in Telugu is to use Hotstar, a popular streaming service owned by Disney. Hotstar offers a wide range of movies and shows in various languages, including Telugu. You can watch Avatar on Hotstar with a premium subscription that costs ₹299 per month or ₹1499 per year. You can also download the movie on your device for offline viewing.

      -

      To watch Avatar on Hotstar, follow these steps:

      -
        -
      1. Go to https://www.hotstar.com/in or download the Hotstar app on your device.
      2. -
      3. Sign up or log in with your account.
      4. -
      5. Search for "Avatar" in the search bar.
      6. -
      7. Select the movie from the results and click on "Watch Now".
      8. -
      9. Select "Telugu" from the audio options.
      10. -
      11. Enjoy watching Avatar in Telugu.
      12. -
      -

      Apple TV, Google Play Movies, YouTube

      -

      Another option to watch Avatar in Telugu is to buy or rent it from online platforms such as Apple TV, Google Play Movies or YouTube. These platforms offer high-quality video and audio options for various devices. You can buy Avatar for ₹490 or rent it for ₹120 on these platforms. However, you should note that renting means that you can only watch the movie for a limited period of time after starting it.

      -

      To buy or rent Avatar from these platforms, follow these steps:

      -
        -
      1. Go to https://www.apple.com/in/tv/, https://play.google.com/store/movies or https://www.youtube.com/movies, depending on your preference.
      2. -
      3. Search for "Avatar" in the search bar.
      4. -
      5. Select the movie from the results and click on "Buy" or "Rent".
      6. -
      7. Select "Telugu" from the audio options.
      8. -
      9. Complete the payment process.
      10. -
      11. Enjoy watching Avatar in Telugu.
      12. -
      -

      Torrent Sites

      -

      A third option to watch Avatar in Telugu is to use torrent sites such as The Pirate Bay, Kickass Torrents or 1337x. These sites allow users to share files using peer-to-peer (P2P) networks. You can find various versions of Avatar Telugu movie full HD download on these sites, such as 720p BRrip or 1080p BluRay. However, you should be very careful when using torrent sites as they are illegal and risky.

      -

      To download Avatar from torrent sites, follow these steps:

      -
        -
      1. Go to any torrent site of your choice using a VPN service or proxy server.
      2. -
      3. Search for "Avatar Telugu" in the search bar.
      4. -
      5. Select the file that matches your preferences and click on "Download".
      6. -
      7. Open the file with a torrent client such as BitTorrent or uTorrent.
      8. -
      9. Wait for the download to complete.
      10. -
      11. Enjoy watching Avatar in Telugu.
      12. -
      -

      Risks and Precautions of Downloading Avatar Telugu Movie Full HD

      -

      Legal Issues

      -

      The first risk that you should be aware of when downloading or streaming movies from unauthorized sources is legal issues. Downloading or streaming copyrighted content without permission is illegal and can result in fines or imprisonment. You can also face lawsuits from the owners or distributors of the content. Therefore, you should always respect the intellectual property rights of the creators and use only legal sources to watch movies online.

      -

      Malware and Viruses

      -

      The second risk that you should be aware of when downloading or streaming movies from unauthorized sources is malware and viruses. Malware and viruses are malicious software that can harm your device or steal your personal information. They can be hidden in fake links, pop-ups or ads that appear on these sources. They can also be embedded in corrupted files that you download from these sources. Therefore, you should always use antivirus software and avoid clicking on suspicious links or ads when using these sources.

      -

      Quality and Subtitles

      -

      The third risk that you should be aware of when downloading or streaming movies from unauthorized sources is quality and subtitles. Quality refers to the resolution, clarity and sound quality of the video file that you download or stream. Subtitles refer to the text that appears on screen to translate or explain what is being said or shown on screen. Quality and subtitles can vary depending on the source that you use. Some sources may offer low-quality video files that are blurry or pixelated. Some sources may offer inaccurate or incomplete subtitles that ruin your viewing experience. Therefore, you should always check the quality and subtitles before downloading or streaming any movie online.

      -

      Conclusion

      - Play Movies, YouTube and torrent sites. We have also discussed the risks and precautions involved in downloading or streaming movies from unauthorized sources such as legal issues, malware and viruses, and quality and subtitles. We hope that this article has helped you to find the best way to watch Avatar in Telugu and enjoy this amazing movie in your native language.

      -

      FAQs

      -

      Here are some frequently asked questions about Avatar Telugu movie full HD download.

      -
        -
      1. Q: Is Avatar available on Netflix or Amazon Prime Video?
        -A: No, Avatar is not available on Netflix or Amazon Prime Video. However, you can watch it on Hotstar or buy or rent it from Apple TV, Google Play Movies or YouTube.
      2. -
      3. Q: Is Avatar 2 going to be released in Telugu?
        -A: Yes, Avatar 2 is going to be released in Telugu along with other languages such as English, Hindi, Tamil and Malayalam. The release date is December 16, 2022.
      4. -
      5. Q: How many sequels are planned for Avatar?
        -A: There are four sequels planned for Avatar. They are Avatar 2, Avatar 3, Avatar 4 and Avatar 5. They are expected to be released in 2022, 2024, 2026 and 2028 respectively.
      6. -
      7. Q: Who are the main actors in Avatar?
        -A: The main actors in Avatar are Sam Worthington as Jake Sully, Zoe Saldana as Neytiri, Stephen Lang as Colonel Miles Quaritch, Michelle Rodriguez as Trudy Chacon, Sigourney Weaver as Dr. Grace Augustine and Giovanni Ribisi as Parker Selfridge.
      8. -
      9. Q: What is the meaning of the word "Avatar"?
        -A: The word "Avatar" has multiple meanings. In Hinduism, it means the incarnation of a deity in a human or animal form. In computing, it means a graphical representation of a user or a character in a virtual environment. In the film, it means a genetically engineered Na'vi body that can be controlled by a human operator using a mental link.
      10. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/rajesh1729/NLP-with-mercury-spacy/app.py b/spaces/rajesh1729/NLP-with-mercury-spacy/app.py deleted file mode 100644 index 8dedab13a4d6779e1146a9782658a7d5d38e1cd8..0000000000000000000000000000000000000000 --- a/spaces/rajesh1729/NLP-with-mercury-spacy/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import os -from dotenv import load_dotenv -from subprocess import Popen -load_dotenv() - - -command = ["mercury", "run", f"0.0.0.0:{os.environ.get('PORT', 7860)}"] -worker = Popen(command) -worker.wait() \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chrome Crypter V2 FUD.epub.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chrome Crypter V2 FUD.epub.md deleted file mode 100644 index 501d744655f468d75925ecc8f649130a71d7eae1..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chrome Crypter V2 FUD.epub.md +++ /dev/null @@ -1,10 +0,0 @@ - -

      in the list below, we see a call to the readprocessmemory function with a specific address. the dropper is only deployed for the 64-bit version of chrome and is asking the server to provide a privilege escalation function (ldrissystemreservedprivilege) for a specific executable (chrome.exe).

      -

      Chrome Crypter V2 FUD.epub


      DOWNLOAD ——— https://urlgoal.com/2uCMDh



      -

      the malware's payload is a custom 32-bit pe binary that is packed using a well-known pe packing algorithm (upx). the program sends its payload to chrome.exe process, and receives an encrypted payload (by default, http) from the server.

      -

      it is important to note that the dropper is not downloading a file from the remote server. instead, it is sending a tiny http request to the remote server. all it needs to do to perform the actual payload delivery is to use the shellcode available in the dropper's memory. if this shellcode is not available, it then proceeds to download the shellcode from the server, or, if it is not able to reach the server, it will simply launch the (malicious) file. this way, the malware avoids leaving any traces of its presence, and does not use the uploadtempfile api.

      -

      the only way to know how the malware's shellcode is created is by analyzing it. in order to do that, we need to run the executable in a vm, and extract the shellcode section from the pe file. our malware analysis tool xploit provides us with this capability:

      -

      -

      koreatech/zg. https://www.dropbox.com/s/8qu5g1lpmsca5k7/chromecrypter-2. http://adobe.com/efc/index.html. 10bohatsu jingen suiseki suigaku you cannot read this file with the chrome crypter. developpeur psp converteur epub sur psp.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/DriverSHARPAR5618SforWindowsXP32bitfree ((HOT)).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/DriverSHARPAR5618SforWindowsXP32bitfree ((HOT)).md deleted file mode 100644 index e5399f9a575aaaf62a7661fa88de72a30332f66f..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/DriverSHARPAR5618SforWindowsXP32bitfree ((HOT)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      DriverSHARPAR5618SforWindowsXP32bitfree


      Download Filehttps://urlgoal.com/2uCKMt



      - -ini. File Name: dmraid.ctl. File Name: dmraid.sys. File Name: mchk.exe.File Name: MSXML2.Server.File Name: msxml3.dll. File Name: msxml3.dll.0. File Name: msxml3.dll.1. File Name: msxml3.dll.3. File Name: msxml3.dll.4. File Name: msxml3.dll.5. File Name: msxml3.dll.6. File Name: msxml3.dll.7. File Name: msxml3.dll.8. File Name: msxml3.dll.9. File Name: MSXML3.dll. File Name: msxml3.dll.10. File Name: msxml3.dll.11. File Name: msxml3.dll.12. File Name: msxml3.dll.13. File Name: msxml3.dll.15. File Name: msxml3.dll.16. File Name: msxml3.dll.17. File Name: msxml3.dll.19. File Name: msxml3.dll.20. File Name: msxml3.dll.21. File Name: msxml3.dll.22. File Name: msxml3.dll.23. File Name: msxml3.dll.25. File Name: msxml3.dll.26. File Name: msxml3.dll.28. File Name: msxml3.dll.29. File Name: msxml3.dll.31. File Name: msxml3.dll.32. File Name: msxml3.dll.33. File Name: msxml3.dll.34. File Name: msxml3.dll.35. File Name: msxml3.dll.36. File Name: msxml3.dll.37. File Name: msxml3.dll.38. File Name: msxml3.dll.39. File Name: msxml3.dll.40. File Name: msxml3.dll.41. File Name: msxml3.dll.42. File Name: msxml3.dll.43. File Name: msxml3.dll.44. File Name: msxml3.dll.45. File Name: msxml3.dll.46. File Name: 4fefd39f24
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HACK Acelogix Ace Utilities 5.3.0 With Keygen - Lz0.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HACK Acelogix Ace Utilities 5.3.0 With Keygen - Lz0.md deleted file mode 100644 index 1e5efc3305aeebfa8fcece1969b056768c7bb350..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HACK Acelogix Ace Utilities 5.3.0 With Keygen - Lz0.md +++ /dev/null @@ -1,7 +0,0 @@ -

      HACK Acelogix Ace Utilities 5.3.0 With Keygen - Lz0


      Download Zip 🗸 https://urlgoal.com/2uCLHh



      - -acelogix ace utilities 5.3.0 with keygen - lz0 acelogix ace 5.3.0 keygen - lz0 acelogix ace 5.3.0 cracked - cjwl acelogix ace 5.3.0 cracked - xwk acelogix ace 5.3.0 keygen - pv8 acelogix ace 5.3.0 keygen - nwqc acelogix ace 5.3.0 keygen - cjwl acelogix ace 5.3.0 cracked - xwk acelogix ace 5.3.0 cracked - nwqc -acelogix ace 5.3.0 keygen - tjjg acelogix ace 5.3.0 keygen - jqwx acelogix ace 5.3.0 keygen - xwk acelogix ace 5.3.0 keygen - pv8 acelogix ace 5.3.0 keygen - nwq 8a78ff9644
      -
      -
      -

      diff --git a/spaces/renatotn7/teste2/gfpgan/archs/gfpganv1_clean_arch.py b/spaces/renatotn7/teste2/gfpgan/archs/gfpganv1_clean_arch.py deleted file mode 100644 index eb2e15d288bf0ad641034ed58d5dab37b0baabb3..0000000000000000000000000000000000000000 --- a/spaces/renatotn7/teste2/gfpgan/archs/gfpganv1_clean_arch.py +++ /dev/null @@ -1,324 +0,0 @@ -import math -import random -import torch -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn -from torch.nn import functional as F - -from .stylegan2_clean_arch import StyleGAN2GeneratorClean - - -class StyleGAN2GeneratorCSFT(StyleGAN2GeneratorClean): - """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). - - It is the clean version without custom compiled CUDA extensions used in StyleGAN2. - - Args: - out_size (int): The spatial size of outputs. - num_style_feat (int): Channel number of style features. Default: 512. - num_mlp (int): Layer number of MLP style layers. Default: 8. - channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. - narrow (float): The narrow ratio for channels. Default: 1. - sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. - """ - - def __init__(self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, narrow=1, sft_half=False): - super(StyleGAN2GeneratorCSFT, self).__init__( - out_size, - num_style_feat=num_style_feat, - num_mlp=num_mlp, - channel_multiplier=channel_multiplier, - narrow=narrow) - self.sft_half = sft_half - - def forward(self, - styles, - conditions, - input_is_latent=False, - noise=None, - randomize_noise=True, - truncation=1, - truncation_latent=None, - inject_index=None, - return_latents=False): - """Forward function for StyleGAN2GeneratorCSFT. - - Args: - styles (list[Tensor]): Sample codes of styles. - conditions (list[Tensor]): SFT conditions to generators. - input_is_latent (bool): Whether input is latent style. Default: False. - noise (Tensor | None): Input noise or None. Default: None. - randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. - truncation (float): The truncation ratio. Default: 1. - truncation_latent (Tensor | None): The truncation latent tensor. Default: None. - inject_index (int | None): The injection index for mixing noise. Default: None. - return_latents (bool): Whether to return style latents. Default: False. - """ - # style codes -> latents with Style MLP layer - if not input_is_latent: - styles = [self.style_mlp(s) for s in styles] - # noises - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers # for each style conv layer - else: # use the stored noise - noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] - # style truncation - if truncation < 1: - style_truncation = [] - for style in styles: - style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) - styles = style_truncation - # get style latents with injection - if len(styles) == 1: - inject_index = self.num_latent - - if styles[0].ndim < 3: - # repeat latent code for all the layers - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: # used for encoder with different latent code for each layer - latent = styles[0] - elif len(styles) == 2: # mixing noises - if inject_index is None: - inject_index = random.randint(1, self.num_latent - 1) - latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) - latent = torch.cat([latent1, latent2], 1) - - # main generation - out = self.constant_input(latent.shape[0]) - out = self.style_conv1(out, latent[:, 0], noise=noise[0]) - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], - noise[2::2], self.to_rgbs): - out = conv1(out, latent[:, i], noise=noise1) - - # the conditions may have fewer levels - if i < len(conditions): - # SFT part to combine the conditions - if self.sft_half: # only apply SFT to half of the channels - out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) - out_sft = out_sft * conditions[i - 1] + conditions[i] - out = torch.cat([out_same, out_sft], dim=1) - else: # apply SFT to all the channels - out = out * conditions[i - 1] + conditions[i] - - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space - i += 2 - - image = skip - - if return_latents: - return image, latent - else: - return image, None - - -class ResBlock(nn.Module): - """Residual block with bilinear upsampling/downsampling. - - Args: - in_channels (int): Channel number of the input. - out_channels (int): Channel number of the output. - mode (str): Upsampling/downsampling mode. Options: down | up. Default: down. - """ - - def __init__(self, in_channels, out_channels, mode='down'): - super(ResBlock, self).__init__() - - self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1) - self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1) - self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False) - if mode == 'down': - self.scale_factor = 0.5 - elif mode == 'up': - self.scale_factor = 2 - - def forward(self, x): - out = F.leaky_relu_(self.conv1(x), negative_slope=0.2) - # upsample/downsample - out = F.interpolate(out, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) - out = F.leaky_relu_(self.conv2(out), negative_slope=0.2) - # skip - x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=False) - skip = self.skip(x) - out = out + skip - return out - - -@ARCH_REGISTRY.register() -class GFPGANv1Clean(nn.Module): - """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. - - It is the clean version without custom compiled CUDA extensions used in StyleGAN2. - - Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. - - Args: - out_size (int): The spatial size of outputs. - num_style_feat (int): Channel number of style features. Default: 512. - channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. - decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. - fix_decoder (bool): Whether to fix the decoder. Default: True. - - num_mlp (int): Layer number of MLP style layers. Default: 8. - input_is_latent (bool): Whether input is latent style. Default: False. - different_w (bool): Whether to use different latent w for different layers. Default: False. - narrow (float): The narrow ratio for channels. Default: 1. - sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. - """ - - def __init__( - self, - out_size, - num_style_feat=512, - channel_multiplier=1, - decoder_load_path=None, - fix_decoder=True, - # for stylegan decoder - num_mlp=8, - input_is_latent=False, - different_w=False, - narrow=1, - sft_half=False): - - super(GFPGANv1Clean, self).__init__() - self.input_is_latent = input_is_latent - self.different_w = different_w - self.num_style_feat = num_style_feat - - unet_narrow = narrow * 0.5 # by default, use a half of input channels - channels = { - '4': int(512 * unet_narrow), - '8': int(512 * unet_narrow), - '16': int(512 * unet_narrow), - '32': int(512 * unet_narrow), - '64': int(256 * channel_multiplier * unet_narrow), - '128': int(128 * channel_multiplier * unet_narrow), - '256': int(64 * channel_multiplier * unet_narrow), - '512': int(32 * channel_multiplier * unet_narrow), - '1024': int(16 * channel_multiplier * unet_narrow) - } - - self.log_size = int(math.log(out_size, 2)) - first_out_size = 2**(int(math.log(out_size, 2))) - - self.conv_body_first = nn.Conv2d(3, channels[f'{first_out_size}'], 1) - - # downsample - in_channels = channels[f'{first_out_size}'] - self.conv_body_down = nn.ModuleList() - for i in range(self.log_size, 2, -1): - out_channels = channels[f'{2**(i - 1)}'] - self.conv_body_down.append(ResBlock(in_channels, out_channels, mode='down')) - in_channels = out_channels - - self.final_conv = nn.Conv2d(in_channels, channels['4'], 3, 1, 1) - - # upsample - in_channels = channels['4'] - self.conv_body_up = nn.ModuleList() - for i in range(3, self.log_size + 1): - out_channels = channels[f'{2**i}'] - self.conv_body_up.append(ResBlock(in_channels, out_channels, mode='up')) - in_channels = out_channels - - # to RGB - self.toRGB = nn.ModuleList() - for i in range(3, self.log_size + 1): - self.toRGB.append(nn.Conv2d(channels[f'{2**i}'], 3, 1)) - - if different_w: - linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat - else: - linear_out_channel = num_style_feat - - self.final_linear = nn.Linear(channels['4'] * 4 * 4, linear_out_channel) - - # the decoder: stylegan2 generator with SFT modulations - self.stylegan_decoder = StyleGAN2GeneratorCSFT( - out_size=out_size, - num_style_feat=num_style_feat, - num_mlp=num_mlp, - channel_multiplier=channel_multiplier, - narrow=narrow, - sft_half=sft_half) - - # load pre-trained stylegan2 model if necessary - if decoder_load_path: - self.stylegan_decoder.load_state_dict( - torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) - # fix decoder without updating params - if fix_decoder: - for _, param in self.stylegan_decoder.named_parameters(): - param.requires_grad = False - - # for SFT modulations (scale and shift) - self.condition_scale = nn.ModuleList() - self.condition_shift = nn.ModuleList() - for i in range(3, self.log_size + 1): - out_channels = channels[f'{2**i}'] - if sft_half: - sft_out_channels = out_channels - else: - sft_out_channels = out_channels * 2 - self.condition_scale.append( - nn.Sequential( - nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), - nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) - self.condition_shift.append( - nn.Sequential( - nn.Conv2d(out_channels, out_channels, 3, 1, 1), nn.LeakyReLU(0.2, True), - nn.Conv2d(out_channels, sft_out_channels, 3, 1, 1))) - - def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): - """Forward function for GFPGANv1Clean. - - Args: - x (Tensor): Input images. - return_latents (bool): Whether to return style latents. Default: False. - return_rgb (bool): Whether return intermediate rgb images. Default: True. - randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. - """ - conditions = [] - unet_skips = [] - out_rgbs = [] - - # encoder - feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2) - for i in range(self.log_size - 2): - feat = self.conv_body_down[i](feat) - unet_skips.insert(0, feat) - feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2) - - # style code - style_code = self.final_linear(feat.view(feat.size(0), -1)) - if self.different_w: - style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) - - # decode - for i in range(self.log_size - 2): - # add unet skip - feat = feat + unet_skips[i] - # ResUpLayer - feat = self.conv_body_up[i](feat) - # generate scale and shift for SFT layers - scale = self.condition_scale[i](feat) - conditions.append(scale.clone()) - shift = self.condition_shift[i](feat) - conditions.append(shift.clone()) - # generate rgb images - if return_rgb: - out_rgbs.append(self.toRGB[i](feat)) - - # decoder - image, _ = self.stylegan_decoder([style_code], - conditions, - return_latents=return_latents, - input_is_latent=self.input_is_latent, - randomize_noise=randomize_noise) - - return image, out_rgbs diff --git a/spaces/riccorl/relik-entity-linking/relik/common/log.py b/spaces/riccorl/relik-entity-linking/relik/common/log.py deleted file mode 100644 index b91e1fa7bfc22b759e0da4d69563315e31ce0e60..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/common/log.py +++ /dev/null @@ -1,97 +0,0 @@ -import logging -import sys -import threading -from typing import Optional - -from rich import get_console - -_lock = threading.Lock() -_default_handler: Optional[logging.Handler] = None - -_default_log_level = logging.WARNING - -# fancy logger -_console = get_console() - - -def _get_library_name() -> str: - return __name__.split(".")[0] - - -def _get_library_root_logger() -> logging.Logger: - return logging.getLogger(_get_library_name()) - - -def _configure_library_root_logger() -> None: - global _default_handler - - with _lock: - if _default_handler: - # This library has already configured the library root logger. - return - _default_handler = logging.StreamHandler() # Set sys.stderr as stream. - _default_handler.flush = sys.stderr.flush - - # Apply our default configuration to the library root logger. - library_root_logger = _get_library_root_logger() - library_root_logger.addHandler(_default_handler) - library_root_logger.setLevel(_default_log_level) - library_root_logger.propagate = False - - -def _reset_library_root_logger() -> None: - global _default_handler - - with _lock: - if not _default_handler: - return - - library_root_logger = _get_library_root_logger() - library_root_logger.removeHandler(_default_handler) - library_root_logger.setLevel(logging.NOTSET) - _default_handler = None - - -def set_log_level(level: int, logger: logging.Logger = None) -> None: - """ - Set the log level. - Args: - level (:obj:`int`): - Logging level. - logger (:obj:`logging.Logger`): - Logger to set the log level. - """ - if not logger: - _configure_library_root_logger() - logger = _get_library_root_logger() - logger.setLevel(level) - - -def get_logger( - name: Optional[str] = None, - level: Optional[int] = None, - formatter: Optional[str] = None, -) -> logging.Logger: - """ - Return a logger with the specified name. - """ - - if name is None: - name = _get_library_name() - - _configure_library_root_logger() - - if level is not None: - set_log_level(level) - - if formatter is None: - formatter = logging.Formatter( - "%(asctime)s - %(levelname)s - %(name)s - %(message)s" - ) - _default_handler.setFormatter(formatter) - - return logging.getLogger(name) - - -def get_console_logger(): - return _console diff --git a/spaces/riccorl/relik-entity-linking/relik/retriever/indexers/faiss.py b/spaces/riccorl/relik-entity-linking/relik/retriever/indexers/faiss.py deleted file mode 100644 index 153da317a99996ce9d80f2b212fa5eefdc5cb351..0000000000000000000000000000000000000000 --- a/spaces/riccorl/relik-entity-linking/relik/retriever/indexers/faiss.py +++ /dev/null @@ -1,422 +0,0 @@ -import contextlib -import logging -import math -import os -from dataclasses import dataclass -from typing import Callable, List, Optional, Union - -import numpy -import psutil -import torch -from relik.retriever.pytorch_modules import RetrievedSample -from torch.utils.data import DataLoader -from tqdm import tqdm - -from relik.common.log import get_logger -from relik.common.utils import is_package_available -from relik.retriever.common.model_inputs import ModelInputs -from relik.retriever.data.base.datasets import BaseDataset -from relik.retriever.data.labels import Labels -from relik.retriever.indexers.base import BaseDocumentIndex -from relik.retriever.pytorch_modules import PRECISION_MAP -from relik.retriever.pytorch_modules.model import GoldenRetriever - -if is_package_available("faiss"): - import faiss - import faiss.contrib.torch_utils - -logger = get_logger(__name__, level=logging.INFO) - - -@dataclass -class FaissOutput: - indices: Union[torch.Tensor, numpy.ndarray] - distances: Union[torch.Tensor, numpy.ndarray] - - -class FaissDocumentIndex(BaseDocumentIndex): - DOCUMENTS_FILE_NAME = "documents.json" - EMBEDDINGS_FILE_NAME = "embeddings.pt" - INDEX_FILE_NAME = "index.faiss" - - def __init__( - self, - documents: Union[List[str], Labels], - embeddings: Optional[Union[torch.Tensor, numpy.ndarray]] = None, - index=None, - index_type: str = "Flat", - nprobe: int = 1, - metric: int = faiss.METRIC_INNER_PRODUCT, - normalize: bool = False, - device: str = "cpu", - name_or_dir: Optional[Union[str, os.PathLike]] = None, - *args, - **kwargs, - ) -> None: - super().__init__(documents, embeddings, name_or_dir) - - if embeddings is not None and documents is not None: - logger.info("Both documents and embeddings are provided.") - if documents.get_label_size() != embeddings.shape[0]: - raise ValueError( - "The number of documents and embeddings must be the same." - ) - - faiss.omp_set_num_threads(psutil.cpu_count(logical=False)) - - # device to store the embeddings - self.device = device - - # params - self.index_type = index_type - self.metric = metric - self.normalize = normalize - - if index is not None: - self.embeddings = index - if self.device == "cuda": - # use a single GPU - faiss_resource = faiss.StandardGpuResources() - self.embeddings = faiss.index_cpu_to_gpu( - faiss_resource, 0, self.embeddings - ) - else: - if embeddings is not None: - # build the faiss index - logger.info("Building the index from the embeddings.") - self.embeddings = self._build_faiss_index( - embeddings=embeddings, - index_type=index_type, - nprobe=nprobe, - normalize=normalize, - metric=metric, - ) - - def _build_faiss_index( - self, - embeddings: Optional[Union[torch.Tensor, numpy.ndarray]], - index_type: str, - nprobe: int, - normalize: bool, - metric: int, - ): - # build the faiss index - self.normalize = ( - normalize - and metric == faiss.METRIC_INNER_PRODUCT - and not isinstance(embeddings, torch.Tensor) - ) - if self.normalize: - index_type = f"L2norm,{index_type}" - faiss_vector_size = embeddings.shape[1] - # if self.device == "cpu": - # index_type = index_type.replace("x,", "x_HNSW32,") - # nlist = math.ceil(math.sqrt(faiss_vector_size)) * 4 - # # nlist = 8 - # index_type = index_type.replace( - # "x", str(nlist) - # ) - # print("Current nlist:", nlist) - print("Current index:", index_type) - self.embeddings = faiss.index_factory(faiss_vector_size, index_type, metric) - - # convert to GPU - if self.device == "cuda": - # use a single GPU - faiss_resource = faiss.StandardGpuResources() - self.embeddings = faiss.index_cpu_to_gpu(faiss_resource, 0, self.embeddings) - else: - # move to CPU if embeddings is a torch.Tensor - embeddings = ( - embeddings.cpu() if isinstance(embeddings, torch.Tensor) else embeddings - ) - - self.embeddings.hnsw.efConstruction = 20 - # convert to float32 if embeddings is a torch.Tensor and is float16 - if isinstance(embeddings, torch.Tensor) and embeddings.dtype == torch.float16: - embeddings = embeddings.float() - - logger.info("Training the index.") - self.embeddings.train(embeddings) - - logger.info("Adding the embeddings to the index.") - self.embeddings.add(embeddings) - - self.embeddings.nprobe = nprobe - - # self.embeddings.hnsw.efSearch - self.embeddings.hnsw.efSearch = 256 - - # self.embeddings.k_factor = 10 - - # save parameters for saving/loading - self.index_type = index_type - self.metric = metric - - # clear the embeddings to free up memory - embeddings = None - - return self.embeddings - - @torch.no_grad() - @torch.inference_mode() - def index( - self, - retriever: GoldenRetriever, - documents: Optional[List[str]] = None, - batch_size: int = 32, - num_workers: int = 4, - max_length: Optional[int] = None, - collate_fn: Optional[Callable] = None, - encoder_precision: Optional[Union[str, int]] = None, - compute_on_cpu: bool = False, - force_reindex: bool = False, - *args, - **kwargs, - ) -> "FaissDocumentIndex": - """ - Index the documents using the encoder. - - Args: - retriever (:obj:`torch.nn.Module`): - The encoder to be used for indexing. - documents (:obj:`List[str]`, `optional`, defaults to None): - The documents to be indexed. - batch_size (:obj:`int`, `optional`, defaults to 32): - The batch size to be used for indexing. - num_workers (:obj:`int`, `optional`, defaults to 4): - The number of workers to be used for indexing. - max_length (:obj:`int`, `optional`, defaults to None): - The maximum length of the input to the encoder. - collate_fn (:obj:`Callable`, `optional`, defaults to None): - The collate function to be used for batching. - encoder_precision (:obj:`Union[str, int]`, `optional`, defaults to None): - The precision to be used for the encoder. - compute_on_cpu (:obj:`bool`, `optional`, defaults to False): - Whether to compute the embeddings on CPU. - force_reindex (:obj:`bool`, `optional`, defaults to False): - Whether to force reindexing. - - Returns: - :obj:`InMemoryIndexer`: The indexer object. - """ - - if self.embeddings is not None and not force_reindex: - logger.log( - "Embeddings are already present and `force_reindex` is `False`. Skipping indexing." - ) - if documents is None: - return self - - # release the memory - if collate_fn is None: - tokenizer = retriever.passage_tokenizer - - def collate_fn(x): - return ModelInputs( - tokenizer( - x, - padding=True, - return_tensors="pt", - truncation=True, - max_length=max_length or tokenizer.model_max_length, - ) - ) - - if force_reindex: - if documents is not None: - self.documents.add_labels(documents) - data = [k for k in self.documents.get_labels()] - - else: - if documents is not None: - data = [k for k in Labels(documents).get_labels()] - else: - return self - - dataloader = DataLoader( - BaseDataset(name="passage", data=data), - batch_size=batch_size, - shuffle=False, - num_workers=num_workers, - pin_memory=False, - collate_fn=collate_fn, - ) - - encoder = retriever.passage_encoder - - # Create empty lists to store the passage embeddings and passage index - passage_embeddings: List[torch.Tensor] = [] - - encoder_device = "cpu" if compute_on_cpu else self.device - - # fucking autocast only wants pure strings like 'cpu' or 'cuda' - # we need to convert the model device to that - device_type_for_autocast = str(encoder_device).split(":")[0] - # autocast doesn't work with CPU and stuff different from bfloat16 - autocast_pssg_mngr = ( - contextlib.nullcontext() - if device_type_for_autocast == "cpu" - else ( - torch.autocast( - device_type=device_type_for_autocast, - dtype=PRECISION_MAP[encoder_precision], - ) - ) - ) - with autocast_pssg_mngr: - # Iterate through each batch in the dataloader - for batch in tqdm(dataloader, desc="Indexing"): - # Move the batch to the device - batch: ModelInputs = batch.to(encoder_device) - # Compute the passage embeddings - passage_outs = encoder(**batch) - # Append the passage embeddings to the list - if self.device == "cpu": - passage_embeddings.extend([c.detach().cpu() for c in passage_outs]) - else: - passage_embeddings.extend([c for c in passage_outs]) - - # move the passage embeddings to the CPU if not already done - passage_embeddings = [c.detach().cpu() for c in passage_embeddings] - # stack it - passage_embeddings: torch.Tensor = torch.stack(passage_embeddings, dim=0) - # convert to float32 for faiss - passage_embeddings.to(PRECISION_MAP["float32"]) - - # index the embeddings - self.embeddings = self._build_faiss_index( - embeddings=passage_embeddings, - index_type=self.index_type, - normalize=self.normalize, - metric=self.metric, - ) - # free up memory from the unused variable - del passage_embeddings - - return self - - @torch.no_grad() - @torch.inference_mode() - def search(self, query: torch.Tensor, k: int = 1) -> list[list[RetrievedSample]]: - - k = min(k, self.embeddings.ntotal) - - if self.normalize: - faiss.normalize_L2(query) - if isinstance(query, torch.Tensor) and self.device == "cpu": - query = query.detach().cpu() - # Retrieve the indices of the top k passage embeddings - retriever_out = self.embeddings.search(query, k) - - # get int values (second element of the tuple) - batch_top_k: List[List[int]] = retriever_out[1].detach().cpu().tolist() - # get float values (first element of the tuple) - batch_scores: List[List[float]] = retriever_out[0].detach().cpu().tolist() - # Retrieve the passages corresponding to the indices - batch_passages = [ - [self.documents.get_label_from_index(i) for i in indices if i != -1] - for indices in batch_top_k - ] - # build the output object - batch_retrieved_samples = [ - [ - RetrievedSample(label=passage, index=index, score=score) - for passage, index, score in zip(passages, indices, scores) - ] - for passages, indices, scores in zip( - batch_passages, batch_top_k, batch_scores - ) - ] - return batch_retrieved_samples - - # def save(self, saving_dir: Union[str, os.PathLike]): - # """ - # Save the indexer to the disk. - - # Args: - # saving_dir (:obj:`Union[str, os.PathLike]`): - # The directory where the indexer will be saved. - # """ - # saving_dir = Path(saving_dir) - # # save the passage embeddings - # index_path = saving_dir / self.INDEX_FILE_NAME - # logger.info(f"Saving passage embeddings to {index_path}") - # faiss.write_index(self.embeddings, str(index_path)) - # # save the passage index - # documents_path = saving_dir / self.DOCUMENTS_FILE_NAME - # logger.info(f"Saving passage index to {documents_path}") - # self.documents.save(documents_path) - - # @classmethod - # def load( - # cls, - # loading_dir: Union[str, os.PathLike], - # device: str = "cpu", - # document_file_name: Optional[str] = None, - # embedding_file_name: Optional[str] = None, - # index_file_name: Optional[str] = None, - # **kwargs, - # ) -> "FaissDocumentIndex": - # loading_dir = Path(loading_dir) - - # document_file_name = document_file_name or cls.DOCUMENTS_FILE_NAME - # embedding_file_name = embedding_file_name or cls.EMBEDDINGS_FILE_NAME - # index_file_name = index_file_name or cls.INDEX_FILE_NAME - - # # load the documents - # documents_path = loading_dir / document_file_name - - # if not documents_path.exists(): - # raise ValueError(f"Document file `{documents_path}` does not exist.") - # logger.info(f"Loading documents from {documents_path}") - # documents = Labels.from_file(documents_path) - - # index = None - # embeddings = None - # # try to load the index directly - # index_path = loading_dir / index_file_name - # if not index_path.exists(): - # # try to load the embeddings - # embedding_path = loading_dir / embedding_file_name - # # run some checks - # if embedding_path.exists(): - # logger.info(f"Loading embeddings from {embedding_path}") - # embeddings = torch.load(embedding_path, map_location="cpu") - # logger.warning( - # f"Index file `{index_path}` and embedding file `{embedding_path}` do not exist." - # ) - # else: - # logger.info(f"Loading index from {index_path}") - # index = faiss.read_index(str(embedding_path)) - - # return cls( - # documents=documents, - # embeddings=embeddings, - # index=index, - # device=device, - # **kwargs, - # ) - - def get_embeddings_from_index( - self, index: int - ) -> Union[torch.Tensor, numpy.ndarray]: - """ - Get the document vector from the index. - - Args: - index (`int`): - The index of the document. - - Returns: - `torch.Tensor`: The document vector. - """ - if self.embeddings is None: - raise ValueError( - "The documents must be indexed before they can be retrieved." - ) - if index >= self.embeddings.ntotal: - raise ValueError( - f"The index {index} is out of bounds. The maximum index is {self.embeddings.ntotal}." - ) - return self.embeddings.reconstruct(index) diff --git a/spaces/robin0307/MMOCR/configs/_base_/recog_models/master.py b/spaces/robin0307/MMOCR/configs/_base_/recog_models/master.py deleted file mode 100644 index 39eaef248e132f7ccd6675b63ba21ef41e350c3b..0000000000000000000000000000000000000000 --- a/spaces/robin0307/MMOCR/configs/_base_/recog_models/master.py +++ /dev/null @@ -1,61 +0,0 @@ -label_convertor = dict( - type='AttnConvertor', dict_type='DICT90', with_unknown=True) - -model = dict( - type='MASTER', - backbone=dict( - type='ResNet', - in_channels=3, - stem_channels=[64, 128], - block_cfgs=dict( - type='BasicBlock', - plugins=dict( - cfg=dict( - type='GCAModule', - ratio=0.0625, - n_head=1, - pooling_type='att', - is_att_scale=False, - fusion_type='channel_add'), - position='after_conv2')), - arch_layers=[1, 2, 5, 3], - arch_channels=[256, 256, 512, 512], - strides=[1, 1, 1, 1], - plugins=[ - dict( - cfg=dict(type='Maxpool2d', kernel_size=2, stride=(2, 2)), - stages=(True, True, False, False), - position='before_stage'), - dict( - cfg=dict(type='Maxpool2d', kernel_size=(2, 1), stride=(2, 1)), - stages=(False, False, True, False), - position='before_stage'), - dict( - cfg=dict( - type='ConvModule', - kernel_size=3, - stride=1, - padding=1, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU')), - stages=(True, True, True, True), - position='after_stage') - ], - init_cfg=[ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Constant', val=1, layer='BatchNorm2d'), - ]), - encoder=None, - decoder=dict( - type='MasterDecoder', - d_model=512, - n_head=8, - attn_drop=0., - ffn_drop=0., - d_inner=2048, - n_layers=3, - feat_pe_drop=0.2, - feat_size=6 * 40), - loss=dict(type='TFLoss', reduction='mean'), - label_convertor=label_convertor, - max_seq_len=30) diff --git a/spaces/safi842/FashionGen/netdissect/upsegmodel/prroi_pool/functional.py b/spaces/safi842/FashionGen/netdissect/upsegmodel/prroi_pool/functional.py deleted file mode 100644 index 7dc7a8c282e846bd633c4fdc4190c4dca3da5a6f..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/netdissect/upsegmodel/prroi_pool/functional.py +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# File : functional.py -# Author : Jiayuan Mao, Tete Xiao -# Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com -# Date : 07/13/2018 -# -# This file is part of PreciseRoIPooling. -# Distributed under terms of the MIT license. -# Copyright (c) 2017 Megvii Technology Limited. - -import torch -import torch.autograd as ag - -try: - from os.path import join as pjoin, dirname - from torch.utils.cpp_extension import load as load_extension - root_dir = pjoin(dirname(__file__), 'src') - _prroi_pooling = load_extension( - '_prroi_pooling', - [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')], - verbose=False - ) -except ImportError: - raise ImportError('Can not compile Precise RoI Pooling library.') - -__all__ = ['prroi_pool2d'] - - -class PrRoIPool2DFunction(ag.Function): - @staticmethod - def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale): - assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \ - 'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type()) - - pooled_height = int(pooled_height) - pooled_width = int(pooled_width) - spatial_scale = float(spatial_scale) - - features = features.contiguous() - rois = rois.contiguous() - params = (pooled_height, pooled_width, spatial_scale) - - if features.is_cuda: - output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params) - ctx.params = params - # everything here is contiguous. - ctx.save_for_backward(features, rois, output) - else: - raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') - - return output - - @staticmethod - def backward(ctx, grad_output): - features, rois, output = ctx.saved_tensors - grad_input = grad_coor = None - - if features.requires_grad: - grad_output = grad_output.contiguous() - grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params) - if rois.requires_grad: - grad_output = grad_output.contiguous() - grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params) - - return grad_input, grad_coor, None, None, None - - -prroi_pool2d = PrRoIPool2DFunction.apply - diff --git a/spaces/sagar-kris/harry-mack-bot/README.md b/spaces/sagar-kris/harry-mack-bot/README.md deleted file mode 100644 index c25f344c8697aed23af2913171f69c891434042e..0000000000000000000000000000000000000000 --- a/spaces/sagar-kris/harry-mack-bot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Harry Mack Bot -emoji: 🌖 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sandraw11031/virtual-staging/model.py b/spaces/sandraw11031/virtual-staging/model.py deleted file mode 100644 index 42fc726f9d3cb7aef8c095f1a8b259ab77f2fa05..0000000000000000000000000000000000000000 --- a/spaces/sandraw11031/virtual-staging/model.py +++ /dev/null @@ -1,180 +0,0 @@ -from collections import defaultdict -import matplotlib.pyplot as plt -import matplotlib.patches as mpatches -from matplotlib import cm - -import cv2 -from PIL import Image -import numpy as np - -import torch -from transformers import AutoImageProcessor, UperNetForSemanticSegmentation -from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation -from diffusers import StableDiffusionInpaintPipeline - - -class VirtualStagingToolV2(): - - def __init__(self, - segmentation_version='openmmlab/upernet-convnext-tiny', - diffusion_version="stabilityai/stable-diffusion-2-inpainting" - ): - - self.segmentation_version = segmentation_version - self.diffusion_version = diffusion_version - - if segmentation_version == "openmmlab/upernet-convnext-tiny": - self.feature_extractor = AutoImageProcessor.from_pretrained(self.segmentation_version) - self.segmentation_model = UperNetForSemanticSegmentation.from_pretrained(self.segmentation_version) - elif segmentation_version == "nvidia/segformer-b5-finetuned-ade-640-640": - self.feature_extractor = SegformerFeatureExtractor.from_pretrained(self.segmentation_version) - self.segmentation_model = SegformerForSemanticSegmentation.from_pretrained(self.segmentation_version) - - self.diffution_pipeline = StableDiffusionInpaintPipeline.from_pretrained( - self.diffusion_version, - torch_dtype=torch.float16, - ) - self.diffution_pipeline = self.diffution_pipeline.to("cuda") - - def _predict(self, image): - inputs = self.feature_extractor(images=image, return_tensors="pt") - outputs = self.segmentation_model(**inputs) - prediction = \ - self.feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] - return prediction - - def _save_mask(self, img, prediction_array, mask_items=[]): - mask = np.zeros_like(prediction_array, dtype=np.uint8) - - mask[np.isin(prediction_array, mask_items)] = 0 - mask[~np.isin(prediction_array, mask_items)] = 255 - - buffer_size = 10 - - # Dilate the binary image - kernel = np.ones((buffer_size, buffer_size), np.uint8) - dilated_image = cv2.dilate(mask, kernel, iterations=1) - - # Subtract the original binary image - buffer_area = dilated_image - mask - - # Apply buffer area to the original image - mask = cv2.bitwise_or(mask, buffer_area) - - # # # Create a PIL Image object from the mask - mask_image = Image.fromarray(mask, mode='L') - # display(mask_image) - - # mask_image = mask_image.resize((512, 512)) - # mask_image.save(".tmp/mask_1.png", "PNG") - # img = img.resize((512, 512)) - # img.save(".tmp/input_1.png", "PNG") - return mask_image - - def _save_transparent_mask(self, img, prediction_array, mask_items=[]): - mask = np.array(img) - mask[~np.isin(prediction_array, mask_items), :] = 255 - mask_image = Image.fromarray(mask).convert('RGBA') - - # Set the transparency of the pixels corresponding to object 1 to 0 (fully transparent) - mask_data = mask_image.getdata() - mask_data = [(r, g, b, 0) if r == 255 else (r, g, b, 255) for (r, g, b, a) in mask_data] - mask_image.putdata(mask_data) - - return mask_image - - def get_mask(self, image_path=None, image=None): - if image_path: - image = Image.open(image_path) - else: - if not image: - raise ValueError("no image provided") - - # display(image) - prediction = self._predict(image) - - label_ids = np.unique(prediction) - - mask_items = [0, 3, 5, 8, 14] - - if 1 in label_ids or 25 in label_ids: - mask_items = [1, 2, 4, 25, 32] - room = 'backyard' - elif 73 in label_ids or 50 in label_ids or 61 in label_ids: - mask_items = [0, 3, 5, 8, 14, 50, 61, 71, 73, 118, 124, 129 - ] - room = 'kitchen' - elif 37 in label_ids or 65 in label_ids or (27 in label_ids and 47 in label_ids and 70 in label_ids): - mask_items = [0, 3, 5, 8, 14, 27, 65] - room = 'bathroom' - elif 7 in label_ids: - room = 'bedroom' - elif 23 in label_ids or 49 in label_ids: - mask_items = [0, 3, 5, 8, 14, 49] - room = 'living room' - elif 15 in label_ids and 19 in label_ids: - room = 'dining room' - else: - room ='room' - label_ids_without_mask = [i for i in label_ids if i not in mask_items] - - items = [self.segmentation_model.config.id2label[i] for i in label_ids_without_mask] - - mask_image = self._save_mask(image, prediction, mask_items) - transparent_mask_image = self._save_transparent_mask(image, prediction, mask_items) - return mask_image, transparent_mask_image, image, items, room - - def _edit_image(self, init_image, mask_image, prompt, # height, width, - number_images=1): - - init_image = init_image.resize((512, 512)).convert("RGB") - mask_image = mask_image.resize((512, 512)).convert("RGB") - - output_images = self.diffution_pipeline( - prompt=prompt, image=init_image, mask_image=mask_image, - # width=width, height=height, - num_images_per_prompt=number_images).images - # display(output_image) - return output_images - - def virtual_stage(self, image_path=None, image=None, style=None, - color_preference=None, additional_info=None, number_images=1): - mask_image, transparent_mask_image, init_image, items, room = self.get_mask(image_path, image) - if not style: - raise ValueError('style not provided.') - - - if room == 'kitchen': - items = [i for i in items if i in ['cabinet', 'shelf', 'counter', 'countertop', 'stool']] - elif room == 'bedroom': - items = [i for i in items if i in ['bed ', 'table', 'chest of drawers', 'desk', 'armchair', 'wardrobe']] - elif room == 'bathroom': - items = [i for i in items if - i in ['shower', 'bathtub', 'screen door', 'cabinet']] - elif room == 'living room': - items = [i for i in items if - i in ['table', 'sofa', 'chest of drawers', 'armchair', 'cabinet', 'coffee table']] - elif room == 'dining room': - items = [i for i in items if i in ['table', 'chair', 'cabinet']] - - items = ', '.join(items) - - if room == 'backyard': - prompt = f'Realistic, high resolution, {room} with {style}' - else: - prompt = f'Realistic {items}, high resolution, in the {style} style {room}' - - if color_preference: - prompt = f"{prompt} in {color_preference}" - - if additional_info: - prompt = f'{prompt}. {additional_info}' - print(prompt) - - output_images = self._edit_image(init_image, mask_image, prompt, number_images) - - final_output_images = [] - for output_image in output_images: - output_image = output_image.resize(init_image.size) - final_output_images.append(output_image) - return final_output_images, transparent_mask_image diff --git a/spaces/sbhatti2009/stock-analysis/app.py b/spaces/sbhatti2009/stock-analysis/app.py deleted file mode 100644 index 61b10181c186840c8454fe31638a328389f0b1ec..0000000000000000000000000000000000000000 --- a/spaces/sbhatti2009/stock-analysis/app.py +++ /dev/null @@ -1,117 +0,0 @@ -import gradio as gr -import nltk -import requests -from bs4 import BeautifulSoup -from fake_headers import Headers -from newspaper import Article -from nltk.tokenize import sent_tokenize -from summa.summarizer import summarize -from transformers import pipeline - -nltk.download("punkt") - - -def get_news(stock, n_results): - header_gen = Headers(headers=True) - headers = header_gen.generate() - - params = {"q": stock + " stock news"} - - response = requests.get( - "https://news.google.com/search", headers=headers, params=params - ) - soup = BeautifulSoup(response.text, "lxml") - - url_elems = soup.find_all(class_="VDXfz") - urls = [] - - for elem in url_elems: - urls.append("https://news.google.com" + elem["href"][1:]) - - articles = [] - total_results = 0 - - for url in urls: - if total_results == n_results: - break - try: - article = Article(url) - article.download() - article.parse() - article = article.text - - if len(article.split()) > 50: - articles.append(article) - total_results += 1 - except: - continue - - return articles - - -def gen_corpus(articles, stock, length): - raw_corpus = "" - - for article in articles: - article_sent = sent_tokenize(article) - - for sent in article_sent: - if len(sent.split()) > 5 and stock.lower() in sent.lower(): - raw_corpus += sent - - corpus = summarize(raw_corpus, words=length) - return corpus - - -def gen_summary(corpus, length): - summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6") - - min_length = int(0.8 * length) - max_length = int(1.2 * length) - raw_summary = summarizer( - corpus[:1024], min_length=min_length, max_length=max_length - )[0]["summary_text"] - - summary_sent = sent_tokenize(raw_summary) - summary = "" - - for sent in summary_sent: - summary += f"-{sent}\n" - - return summary - - -def gen_sentiment(corpus): - analyzer = pipeline("sentiment-analysis", model="ProsusAI/finbert") - raw_sentiment = analyzer(corpus[:512], return_all_scores=True) - - sentiment = {} - - for sentiment_dict in raw_sentiment[0]: - label = sentiment_dict["label"] - value = sentiment_dict["score"] - - if label == "positive": - label = "bull" - elif label == "negative": - label = "bear" - else: - label = "neutral" - - sentiment[label] = value - - return sentiment - - -def gen_report(stock, length): - news = get_news(stock, length / 5) - corpus = gen_corpus(news, stock, length * 5) - summary = gen_summary(corpus, length) - sentiment = gen_sentiment(corpus) - - return summary, sentiment - - -iface = gr.Interface( - fn=gen_report, inputs=["text", "number"], outputs=["text", "label"], theme="default" -).launch() diff --git a/spaces/scedlatioru/img-to-music/example/Escape Rosecliff Island Activation Code [Patch] !FREE!.md b/spaces/scedlatioru/img-to-music/example/Escape Rosecliff Island Activation Code [Patch] !FREE!.md deleted file mode 100644 index a7e2ef631cb231a6838bc7925b994a74fe0e6d62..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Escape Rosecliff Island Activation Code [Patch] !FREE!.md +++ /dev/null @@ -1,15 +0,0 @@ -

      Escape Rosecliff Island Activation Code [Patch]


      Download Zip --->>> https://gohhs.com/2uEAw1



      - -For the best results, select topic, platform and/or keywords ; How to redeem products and promo codes - Wed, 12 May 2021 ; Buying EA games: ordering and ... 1 - How to book a game? -2 - How to buy the game? -3 - How to activate the key? -4 - What are my player rights? -5 - What happens if the game is not activated? -How to get the most out of purchased games. -In this article: Buying games in the Origin store; Checking the game's status; Registering a copy -How to use ... -To avoid repeating the same procedure several times, choose the best option for you. -How to buy a digital copy of a game on Origin ... 8a78ff9644
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/FSX - PMDG 777 All PMDG Service Packs Crack.rar Utorrent ((INSTALL)).md b/spaces/scedlatioru/img-to-music/example/FSX - PMDG 777 All PMDG Service Packs Crack.rar Utorrent ((INSTALL)).md deleted file mode 100644 index 501ac696509b65dae1e92f881d42236fd2270f75..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/FSX - PMDG 777 All PMDG Service Packs Crack.rar Utorrent ((INSTALL)).md +++ /dev/null @@ -1,6 +0,0 @@ -

      FSX - PMDG 777 All PMDG Service Packs Crack.rar utorrent


      Downloadhttps://gohhs.com/2uEA3B



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/scedlatioru/img-to-music/example/Spud Exit Pursued By A Bear Ebook Free [Extra Quality] Download.md b/spaces/scedlatioru/img-to-music/example/Spud Exit Pursued By A Bear Ebook Free [Extra Quality] Download.md deleted file mode 100644 index 423b9864a08a6d8547c00fe2df8b973c935b7be2..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Spud Exit Pursued By A Bear Ebook Free [Extra Quality] Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Spud Exit Pursued By A Bear Ebook Free Download


      Download Filehttps://gohhs.com/2uEA2C



      -
      -Book Club Books, Good Books, Books To Read, My Books, Page Turner. More information. Spud ... Spud: Exit, Pursued by a Bear - John van de Ruit Ya Books,. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/sdhsdhk/bingosjj/src/lib/storage.ts b/spaces/sdhsdhk/bingosjj/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/sdhsdhk/bingosjj/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/sdutta28/AggDetectApp/components/get_predictions.py b/spaces/sdutta28/AggDetectApp/components/get_predictions.py deleted file mode 100644 index 428f0d7eca25cad9bd9ba55e09c9c1c96922c171..0000000000000000000000000000000000000000 --- a/spaces/sdutta28/AggDetectApp/components/get_predictions.py +++ /dev/null @@ -1,77 +0,0 @@ -import components.utils as utils -from components.config import app_config -from components.models import ( - pipeline_task_A, - pipeline_task_B, - explainer_task_A, - explainer_task_B, -) -from lime.lime_text import LimeTextExplainer -from typing import Any -from matplotlib.figure import Figure - - -def predict_for_pipeline( - model_pipeline: Any, - explainer: LimeTextExplainer, - cleaned_data: list[str], - labels: list, -) -> tuple[int, Figure | None]: - """Generates Prediction and Explanation given the cleaned text - - Args: - model_pipeline (Any): Joblib imported model pipeline - explainer (LimeTextExplainer): text explainer - cleaned_data (list[str]): cleaned text - labels(list): list of integers as labels - - Returns: - tuple[int, Figure]: class prediction and LIME explanation as matplotlib figure - """ - - explanation = explainer.explain_instance( - cleaned_data[0], - model_pipeline.predict_proba, - num_features=app_config.NUM_EXPLAINER_FEATURES, - labels=labels, - ) - - class_prediction = model_pipeline.predict(cleaned_data)[0] - return class_prediction, explanation.as_pyplot_figure(label=1) - - -def get_predictions(text: str) -> tuple: - """Gets Predictions for the Texts - - Args: - text (str): The input text to get predictions for - - Returns: - tuple[str, Any]: Predictions for task A and task B - along with Figures - """ - - cleaned_data = [utils.clean_one_text(text)] - - prediction_task_A = predict_for_pipeline( - pipeline_task_A, - explainer_task_A, - cleaned_data, - [0, 1, 2], - ) - prediction_task_B = predict_for_pipeline( - pipeline_task_B, - explainer_task_B, - cleaned_data, - [0, 1], - ) - - print(prediction_task_A) - print(prediction_task_B) - - return ( - app_config.TASK_A_MAP[prediction_task_A[0]], - app_config.TASK_B_MAP[prediction_task_B[0]], - prediction_task_A[1], - prediction_task_B[1], - ) diff --git a/spaces/seok07/Voice-Changer1/README.md b/spaces/seok07/Voice-Changer1/README.md deleted file mode 100644 index 14227ce76449f67d83b66a156b74589fbf3b2c3d..0000000000000000000000000000000000000000 --- a/spaces/seok07/Voice-Changer1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: VoiceChange -emoji: 👀 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.28.3 -app_file: app_multi.py -pinned: false -license: mit -duplicated_from: kevinwang676/Voice-Changer ---- diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/i18n.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/i18n.py deleted file mode 100644 index 1d7fe71d0e443a90492ff033ee34460e3429379f..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/i18n.py +++ /dev/null @@ -1,25 +0,0 @@ -import locale -import json -import os - - -def load_language_list(language): - with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f: - language_list = json.load(f) - return language_list - - -class I18nAuto: - def __init__(self, language=None): - if language in ["Auto", None]: - language = locale.getdefaultlocale()[ - 0 - ] # getlocale can't identify the system's language ((None, None)) - if not os.path.exists(f"./i18n/{language}.json"): - language = "en_US" - self.language = language - print("Use Language:", language) - self.language_map = load_language_list(language) - - def __call__(self, key): - return self.language_map.get(key, key) diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/trainset_preprocess_pipeline_print.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/trainset_preprocess_pipeline_print.py deleted file mode 100644 index 0e15bc4e7498223d7c7767f3b0840f2d9351059a..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/trainset_preprocess_pipeline_print.py +++ /dev/null @@ -1,135 +0,0 @@ -import sys, os, multiprocessing -from scipy import signal - -now_dir = os.getcwd() -sys.path.append(now_dir) - -inp_root = sys.argv[1] -sr = int(sys.argv[2]) -n_p = int(sys.argv[3]) -exp_dir = sys.argv[4] -noparallel = sys.argv[5] == "True" -import numpy as np, os, traceback -from slicer2 import Slicer -import librosa, traceback -from scipy.io import wavfile -import multiprocessing -from my_utils import load_audio - -mutex = multiprocessing.Lock() -f = open("%s/preprocess.log" % exp_dir, "a+") - - -def println(strr): - mutex.acquire() - print(strr) - f.write("%s\n" % strr) - f.flush() - mutex.release() - - -class PreProcess: - def __init__(self, sr, exp_dir): - self.slicer = Slicer( - sr=sr, - threshold=-40, - min_length=800, - min_interval=400, - hop_size=15, - max_sil_kept=150, - ) - self.sr = sr - self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr) - self.per = 3.7 - self.overlap = 0.3 - self.tail = self.per + self.overlap - self.max = 0.95 - self.alpha = 0.8 - self.exp_dir = exp_dir - self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir - self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir - os.makedirs(self.exp_dir, exist_ok=True) - os.makedirs(self.gt_wavs_dir, exist_ok=True) - os.makedirs(self.wavs16k_dir, exist_ok=True) - - def norm_write(self, tmp_audio, idx0, idx1): - tmp_audio = (tmp_audio / np.abs(tmp_audio).max() * (self.max * self.alpha)) + ( - 1 - self.alpha - ) * tmp_audio - wavfile.write( - "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1), - self.sr, - tmp_audio.astype(np.float32), - ) - tmp_audio = librosa.resample( - tmp_audio, orig_sr=self.sr, target_sr=16000 - ) # , res_type="soxr_vhq" - wavfile.write( - "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1), - 16000, - tmp_audio.astype(np.float32), - ) - - def pipeline(self, path, idx0): - try: - audio = load_audio(path, self.sr) - # zero phased digital filter cause pre-ringing noise... - # audio = signal.filtfilt(self.bh, self.ah, audio) - audio = signal.lfilter(self.bh, self.ah, audio) - - idx1 = 0 - for audio in self.slicer.slice(audio): - i = 0 - while 1: - start = int(self.sr * (self.per - self.overlap) * i) - i += 1 - if len(audio[start:]) > self.tail * self.sr: - tmp_audio = audio[start : start + int(self.per * self.sr)] - self.norm_write(tmp_audio, idx0, idx1) - idx1 += 1 - else: - tmp_audio = audio[start:] - idx1 += 1 - break - self.norm_write(tmp_audio, idx0, idx1) - println("%s->Suc." % path) - except: - println("%s->%s" % (path, traceback.format_exc())) - - def pipeline_mp(self, infos): - for path, idx0 in infos: - self.pipeline(path, idx0) - - def pipeline_mp_inp_dir(self, inp_root, n_p): - try: - infos = [ - ("%s/%s" % (inp_root, name), idx) - for idx, name in enumerate(sorted(list(os.listdir(inp_root)))) - ] - if noparallel: - for i in range(n_p): - self.pipeline_mp(infos[i::n_p]) - else: - ps = [] - for i in range(n_p): - p = multiprocessing.Process( - target=self.pipeline_mp, args=(infos[i::n_p],) - ) - p.start() - ps.append(p) - for p in ps: - p.join() - except: - println("Fail. %s" % traceback.format_exc()) - - -def preprocess_trainset(inp_root, sr, n_p, exp_dir): - pp = PreProcess(sr, exp_dir) - println("start preprocess") - println(sys.argv) - pp.pipeline_mp_inp_dir(inp_root, n_p) - println("end preprocess") - - -if __name__ == "__main__": - preprocess_trainset(inp_root, sr, n_p, exp_dir) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/BlueStacks 5 old version download - Enjoy your favorite Android games on your PC.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/BlueStacks 5 old version download - Enjoy your favorite Android games on your PC.md deleted file mode 100644 index 2fe325b864adb0c96cf84a147fd0b74ede184d7a..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/BlueStacks 5 old version download - Enjoy your favorite Android games on your PC.md +++ /dev/null @@ -1,135 +0,0 @@ - -

      Bluestacks 5 Old Version Download: How to Run Android Apps on PC

      -

      Do you want to run Android apps on your PC without using a virtual machine or dual booting? If yes, then you might want to try Bluestacks, one of the most popular and reliable Android emulators for Windows and Mac computers. In this article, we will show you how to download and install Bluestacks 5 old version on your PC, how to use it to run Android apps, and what are some of the best Bluestacks alternatives that you can try.

      -

      bluestacks 5 old version download


      Download ››› https://ssurll.com/2uNWbC



      -

      What is Bluestacks and why use it?

      -

      Bluestacks is an Android emulator that allows you to run Android apps on your computer. It creates a virtual environment that mimics an Android device, so you can access the Google Play Store, install apps, and use them as if you were using a smartphone or tablet. You can also sync your data between your PC and your mobile device using cloud services.

      -

      There are many reasons why you might want to use Bluestacks on your PC. Some of them are:

      -

      Bluestacks features and benefits include:

      -
        -
      • Easy app installation from Google Play Store

        -

        You can install any Android app directly from the Google Play Store within Bluestacks, just like you would on your phone. You don't need to download APK files or sideload apps from other sources. You can also update your apps automatically or manually.

      • -
      • Integration with PC devices and peripherals

        -

        Bluestacks integrates seamlessly with your computer's devices and peripherals, such as mouse, keyboard, webcam, microphone, speakers, etc. You can use them to control your apps, chat, video call, record, stream, etc. You can also adjust the resolution, orientation, and display settings of your apps according to your PC screen.

      • -
      • High performance and smooth gameplay

        -

        Bluestacks is designed to deliver high performance and smooth gameplay for Android apps on your PC. It uses advanced technologies such as Hyper-G Graphics, AI-driven resource management, and multi-core processing to optimize the speed, stability, and compatibility of your apps. You can also enable the Eco Mode to save battery and CPU usage.

        -

        bluestacks 5 offline installer
        -bluestacks 5 nougat 32-bit
        -bluestacks 5.12.0.1085 download
        -bluestacks 5.11.100.2102 download
        -bluestacks 5.11.100.1063 download
        -bluestacks 5.11.56.1003 download
        -bluestacks 5.11.50.2102 download
        -bluestacks 5.11.42.2003 download
        -bluestacks 5.11.42.1002 download
        -bluestacks 5.11.40.1003 download
        -bluestacks 5.10.230.1003 download
        -bluestacks 5.10.220.1008 download
        -bluestacks 5.10.220.1006 download
        -bluestacks 5.10.220.1003 download
        -bluestacks 5.10.210 download
        -bluestacks 5.10.110.1002 download
        -bluestacks 5.10.100.1016 download
        -bluestacks 5.10.20.1003 download
        -bluestacks 5 old version for windows
        -bluestacks 5 old version for mac
        -bluestacks 5 old version uptodown
        -bluestacks 5 old version filehippo
        -bluestacks 5 old version support.bluestacks.com
        -how to install bluestacks 5 old version
        -how to update bluestacks 5 old version
        -how to uninstall bluestacks 5 old version
        -how to run android apps on pc with bluestacks 5 old version
        -how to fix bluestacks 5 old version errors
        -how to improve performance of bluestacks 5 old version
        -how to change settings of bluestacks 5 old version
        -why use bluestacks 5 old version instead of new version
        -why is bluestacks 5 old version better than other emulators
        -why is bluestacks 5 old version not working on my device
        -what are the benefits of using bluestacks 5 old version
        -what are the drawbacks of using bluestacks 5 old version
        -what are the system requirements for bluestacks 5 old version
        -what are the features of bluestacks 5 old version
        -what are the differences between bluestacks 4 and bluestacks 5 old version
        -what are the best games to play on bluestacks 5 old version
        -what are the best alternatives to bluestacks 5 old version

      • -
      • Root access and customization options

        -

        If you want to have more control and flexibility over your Android apps, you can root Bluestacks and access advanced features and settings. You can install root apps, change the system files, modify the app permissions, etc. You can also customize the interface and appearance of Bluestacks according to your preference.

      • -
      -

      How to download and install Bluestacks 5 old version on your PC

      -

      If you are interested in using Bluestacks 5 old version on your PC, you can follow these simple steps:

      -
        -
      1. Visit the official website or a trusted source and click on "Download Bluestacks 5"

        -

        You can download Bluestacks 5 old version from the official website or from a trusted source. Make sure you choose the right version for your operating system (Windows or Mac). The download size is about 500 MB.

      2. -
      3. Launch the installer and follow the instructions

        -

        Once you have downloaded the installer, double-click on it and follow the instructions on the screen. You can choose the installation location, language, and shortcuts. The installation process may take a few minutes depending on your PC specifications.

      4. -
      5. Choose the Android version you want to use (Nougat, Pie, or Android 11)

        -

        After the installation is complete, you can choose which Android version you want to use on Bluestacks 5 old version. You can select from Nougat (Android 7), Pie (Android 9), or Android 11. Each version has its own advantages and disadvantages in terms of compatibility, performance, and features. You can switch between them anytime from the settings menu.

      6. -
      7. Log in with your Google account and start using Bluestacks 5

        -

        The last step is to log in with your Google account on Bluestacks 5 old version. This will allow you to access the Google Play Store, sync your data, and use Google services. You can also skip this step if you don't want to use a Google account. Now you are ready to start using Bluestacks 5 old version on your PC.

      8. -
      -

      How to use Bluestacks 5 to run Android apps on your PC

      -

      Using Bluestacks 5 old version to run Android apps on your PC is very easy and intuitive. Here are some tips on how to do it:

      -
        -
      1. Search for the app you want to install from the Google Play Store or App Center

        -

        You can find any Android app you want from the Google Play Store or App Center within Bluestacks 5 old version. You can browse by categories, genres, ratings, etc. You can also use the search bar to type the name of the app or keywords related to it.

      2. -
      3. Click on the app icon and install it on Bluestacks 5

        -

        Once you have found the app you want, click on its icon and then click on "Install" or "Get". The app will be downloaded and installed on Bluestacks 5 old version. You can see the progress of the installation on the notification bar. You can also cancel or pause the installation if you want.

      4. -
      5. Launch the app and enjoy it on your PC screen

        -

        After the app is installed, you can launch it by clicking on its icon on the home screen or in the app drawer of Bluestacks 5 old version. The app will open in a separate window that you can resize, move, or minimize as you wish. You can also switch between full-screen and windowed mode by pressing F11.

      6. -
      7. Customize the controls, settings, and performance modes according to your preference

        -

        You can customize the controls, settings, and performance modes of your apps on Bluestacks 5 old version according to your preference. You can use the keyboard, mouse, gamepad, or touchpad to control your apps. You can also assign keyboard shortcuts, macros, or gestures to perform certain actions. You can change the settings of your apps, such as the language, notifications, permissions, etc. You can also choose the performance mode of your apps, such as balanced, high FPS, or eco mode.

      8. -
      -

      Bluestacks alternatives and competitors

      -

      If you are looking for other options to run Android apps on your PC, you can try these Bluestacks alternatives that are also popular and reliable Android emulators:

      -

      If you are looking for other options to run Android apps on your PC, you can try these Bluestacks alternatives:

      -
        -
      • Nox Player: A feature-rich and powerful Android emulator with gamepad support and root access

        -

        Nox Player is a feature-rich and powerful Android emulator that lets you run Android apps and games on your PC with ease. It supports gamepad, keyboard, mouse, and touch controls. It also has root access and allows you to customize the interface and settings. It supports multiple instances and has a built-in video recorder and screenshot tool.

      • -
      • MEmu Play: A fast and stable Android emulator with multiple instances and performance modes

        -

        MEmu Play is a fast and stable Android emulator that lets you run Android apps and games on your PC with high performance. It supports multiple instances and allows you to run different apps or games simultaneously. It also has various performance modes, such as smart mode, speed mode, and compatibility mode. It supports keyboard, mouse, gamepad, and touch controls.

      • -
      • LDPlayer: A new and optimized Android emulator with high FPS and compatibility

        -

        LDPlayer is a new and optimized Android emulator that lets you run Android apps and games on your PC with high FPS and compatibility. It supports keyboard, mouse, gamepad, and touch controls. It also has a built-in app store and a game center. It supports multiple instances and has a smart keymapping tool.

      • -
      -

      Conclusion

      -

      In conclusion, Bluestacks 5 old version is one of the best Android emulators that lets you run Android apps on your PC with ease and efficiency. You can download and install Bluestacks 5 old version from the official website or a trusted source. You can choose from different Android versions, customize the controls and settings, and enjoy a smooth gameplay experience. You can also try other Bluestacks alternatives if you want to explore more options.

      -

      We hope this article has helped you learn how to download and use Bluestacks 5 old version on your PC. If you have any questions or feedback, please feel free to leave a comment below.

      -

      Frequently Asked Questions (FAQs)

      -
        -
      • Is Bluestacks 5 old version safe to use?

        -

        Bluestacks 5 old version is safe to use as long as you download it from the official website or a trusted source. It does not contain any malware or viruses that can harm your PC. However, you should always be careful when installing apps from unknown sources or granting permissions to apps that may access your personal data.

      • -
      • Is Bluestacks 5 old version free to use?

        -

        Bluestacks 5 old version is free to use for personal use. However, it may display some ads or promotions from time to time. You can also upgrade to the premium version of Bluestacks 5 old version for a monthly or yearly fee that will remove the ads and unlock some exclusive features.

      • -
      • How can I update Bluestacks 5 old version?

        -

        You can update Bluestacks 5 old version by clicking on the "Check for updates" button on the home screen or in the settings menu. You can also enable the automatic update option that will notify you when a new version is available. You can then download and install the update with one click.

      • -
      • How can I uninstall Bluestacks 5 old version?

        -

        You can uninstall Bluestacks 5 old version by following these steps:

        -
          -
        1. Close Bluestacks 5 old version and any apps running on it.
        2. -
        3. Go to the Control Panel or Settings on your PC and click on "Uninstall a program" or "Apps and features".
        4. -
        5. Find and select Bluestacks 5 old version from the list of programs and click on "Uninstall" or "Remove".
        6. -
        7. Follow the instructions on the screen to complete the uninstallation process.
        8. -
        9. Restart your PC to remove any leftover files or registry entries.
        10. -
      • -
      • What are the system requirements for Bluestacks 5 old version?

        -

        The minimum system requirements for Bluestacks 5 old version are:

        -
          -
        • Operating system: Windows 7 or higher, Mac OS X 10.12 or higher
        • -
        • Processor: Intel or AMD dual-core processor
        • -
        • Memory: 2 GB RAM
        • -
        • Disk space: 5 GB free space
        • -
        • Graphics: Intel HD Graphics 520 or higher, Nvidia GeForce 820M or higher, AMD Radeon R5 M330 or higher
        • -
        • Internet connection: Broadband or Wi-Fi
        • -
        -

        The recommended system requirements for Bluestacks 5 old version are:

        -
          -
        • Operating system: Windows 10, Mac OS X 10.15 or higher
        • -
        • Processor: Intel or AMD quad-core processor
        • -
        • Memory: 4 GB RAM or higher
        • -
        • Disk space: 10 GB free space or higher
        • -
        • Graphics: Intel HD Graphics 620 or higher, Nvidia GeForce GTX 1050 or higher, AMD Radeon RX 560 or higher
        • -
        • Internet connection: Broadband or Wi-Fi
        • -
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download DJ Aint My Fault X Tokyo Drift for Free - The Most Popular Song on TikTok and YouTube.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download DJ Aint My Fault X Tokyo Drift for Free - The Most Popular Song on TikTok and YouTube.md deleted file mode 100644 index 7fce88b2872a5a10f7e49ca0595940924711d29b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download DJ Aint My Fault X Tokyo Drift for Free - The Most Popular Song on TikTok and YouTube.md +++ /dev/null @@ -1,165 +0,0 @@ - -

      How to Download DJ Aint My Fault X Tokyo Drift Remix

      -

      If you are a fan of EDM music, you may have heard of DJ Aint My Fault X Tokyo Drift Remix, a viral song that combines two popular tracks: Aint My Fault by Zara Larsson and Tokyo Drift by Teriyaki Boyz. This remix is catchy, energetic, and perfect for dancing or working out. But how can you download it from YouTube and enjoy it offline? In this article, we will show you how to do that legally and safely, as well as how to make the most of this awesome remix.

      -

      What is DJ Aint My Fault X Tokyo Drift Remix?

      -

      DJ Aint My Fault X Tokyo Drift Remix is a mashup song created by DJ NWP, a talented producer who specializes in EDM, jungle dutch, and breakbeat music. He uploaded the remix on his YouTube channel on June 19, 2022, and it has since gained over 267,000 views and 10,000 Shazams.

      -

      download dj aint my fault x tokyo drift


      Download Zip 🆓 https://ssurll.com/2uNUEi



      -

      The origin and popularity of the song

      -

      The remix combines two songs that are already well-known and loved by many people. Aint My Fault is a pop song by Swedish singer Zara Larsson, released in 2016 as the lead single from her second studio album So Good. It peaked at number 13 on the UK Singles Chart and number 30 on the US Billboard Hot 100.

      -

      Tokyo Drift is a hip hop song by Japanese group Teriyaki Boyz, released in 2006 as part of the soundtrack for The Fast and the Furious: Tokyo Drift, the third installment in the Fast & Furious franchise. It became an iconic song for the movie and the car racing culture, reaching number one on the iTunes hip hop chart.

      -

      By mixing these two songs together, DJ NWP created a unique blend of pop and hip hop, with a touch of EDM flair. The remix has a fast tempo, a catchy chorus, and a powerful bass that makes you want to move your body. It also features some sound effects from the movie, such as car engines revving and tires screeching, adding to the excitement and adrenaline of the song.

      -

      The features and benefits of the remix

      -

      One of the main reasons why DJ Aint My Fault X Tokyo Drift Remix is so popular is because it offers many features and benefits for its listeners. Here are some of them:

      -
        -
      • It is free to listen to on YouTube or other streaming platforms.
      • -
      • It is easy to download from YouTube using various methods (more on that later).
      • -
      • It is compatible with any device that can play MP3 files.
      • -
      • It is suitable for any occasion, whether you want to party, work out, or relax.
      • -
      • It is fun to sing along to or dance to with your friends.
      • -
      • It is a great way to discover new music from different genres and cultures.
      • -
      -

      How to download music from YouTube legally and safely

      -

      Now that you know what DJ Aint My Fault X Tokyo Drift Remix is and why it is so awesome, you may be wondering how to download it from YouTube and listen to it offline. Well, there are several ways to do that, but not all of them are legal and safe. In this section, we will explain how to download music from YouTube legally and safely, and compare the pros and cons of each method.

      -

      The official way: YouTube Music Premium or YouTube Premium

      -

      The official way to download music from YouTube is to subscribe to YouTube Music Premium or YouTube Premium, which are paid services that allow you to enjoy ad-free music and videos, as well as download them for offline playback.

      -

      download dj aint my fault x tokyo drift remix tik tok full bass
      -download dj aint my fault x tokyo drift mp3 by dj's audionet
      -download dj aint my fault x tokyo drift mp3 by dj tanti
      -download dj aint my fault x tokyo drift shazam
      -download dj aint my fault x tokyo drift soundcloud
      -download dj aint my fault x tokyo drift rytmp3
      -download dj aint my fault x tokyo drift lyrics
      -download dj aint my fault x tokyo drift music video
      -download dj aint my fault x tokyo drift 320 kbps
      -download dj aint my fault x tokyo drift 10.68 mb
      -download dj aint my fault x tokyo drift 04:40 duration
      -download dj aint my fault x tokyo drift free online
      -download dj aint my fault x tokyo drift for android
      -download dj aint my fault x tokyo drift for iphone
      -download dj aint my fault x tokyo drift for pc
      -download dj aint my fault x tokyo drift offline
      -download dj aint my fault x tokyo drift ringtone
      -download dj aint my fault x tokyo drift instrumental
      -download dj aint my fault x tokyo drift original mix
      -download dj aint my fault x tokyo drift extended mix
      -download dj aint my fault x tokyo drift mashup
      -download dj aint my fault x tokyo drift cover
      -download dj aint my fault x tokyo drift karaoke
      -download dj aint my fault x tokyo drift dance challenge
      -download dj aint my fault x tokyo drift dance tutorial
      -download dj aint my fault x tokyo drift reaction video
      -download dj aint my fault x tokyo drift live performance
      -download dj aint my fault x tokyo drift behind the scenes
      -download dj aint my fault x tokyo drift spotify playlist
      -download dj aint my fault x tokyo drift youtube playlist
      -download dj aint my fault x tokyo drift apple music playlist
      -download dj aint my fault x tokyo drift amazon music playlist
      -download dj aint my fault x tokyo drift deezer playlist
      -download dj aint my fault x tokyo drift tidal playlist
      -download dj aint my fault x tokyo drift pandora playlist
      -download dj aint my fault x tokyo drift soundcloud playlist
      -download dj aint my fault x tokyo drift similar songs
      -download dj aint my fault x tokyo drift related tracks
      -download dj aint my fault x tokyo drift genre and style
      -download dj aint my fault x tokyo drift artist and producer
      -download dj aint my fault x tokyo drift release date and label
      -download dj aint my fault x tokyo drift reviews and ratings
      -download dj aint my fault x tokyo drift comments and feedbacks
      -download dj aint my fault x tokyo drift fan club and community
      -download dj aint my fault x tokyo drift merchandise and tickets
      -download dj aint my fault x tokyo drift trivia and facts
      -download dj aint my fault x tokyo drift awards and nominations
      -download dj aint my fault x tokyo drift charts and rankings
      -download dj aint my fault x tokyo drift streams and downloads statistics

      -

      YouTube Music Premium costs $9.99 per month, and gives you access to a huge library of songs, albums, playlists, and remixes, including DJ Aint My Fault X Tokyo Drift Remix. You can also create your own playlists, discover new music, and listen to music in the background while using other apps.

      -

      YouTube Premium costs $11.99 per month, and gives you all the benefits of YouTube Music Premium, plus the ability to download any video from YouTube, watch original shows and movies, and play videos in the background or with the screen off.

      -

      To download music from YouTube using these services, you need to follow these steps:

      -
        -
      1. Open the YouTube app on your device and sign in with your Google account.
      2. -
      3. Search for DJ Aint My Fault X Tokyo Drift Remix or any other song you want to download.
      4. -
      5. Tap on the song and then tap on the download icon (a downward arrow) below the video.
      6. -
      7. Select the audio quality you prefer (low, medium, or high) and tap OK.
      8. -
      9. Wait for the download to finish and then go to your library to find your downloaded music.
      10. -
      -

      The pros of this method are:

      -
        -
      • It is legal and authorized by YouTube and the music owners.
      • -
      • It is easy and convenient to use.
      • -
      • It offers high-quality audio and video downloads.
      • -
      • It supports offline playback on any device that has the YouTube app installed.
      • -
      -

      The cons of this method are:

      -
        -
      • It is expensive and requires a monthly subscription fee.
      • -
      • It does not allow you to transfer the downloaded music to other devices or apps.
      • -
      • It does not work if you cancel your subscription or lose your internet connection.
      • -
      -

      The unofficial ways: 4K Video Downloader, MediaHuman, or online converters

      -

      The unofficial ways to download music from YouTube are to use third-party software or websites that can extract the audio from YouTube videos and save them as MP3 files. Some of the most popular ones are 4K Video Downloader, MediaHuman, and online converters such as Y2Mate or MP3Juices.

      -

      To download music from YouTube using these tools, you need to follow these steps:

      -
        -
      1. Open your web browser and go to YouTube.com.
      2. -
      3. Search for DJ Aint My Fault X Tokyo Drift Remix or any other song you want to download.
      4. -
      5. Copy the URL of the song from the address bar.
      6. -
      7. Open another tab and go to the website or software of your choice (e.g., 4K Video Downloader).
      8. -
      9. Paste the URL of the song into the input box and click on the download button.
      10. -
      11. Select the format and quality you prefer (e.g., MP3, 320 kbps) and click OK.
      12. -
      13. Wait for the download to finish and then go to your downloads folder to find your downloaded music.
      14. -
      -

      The pros of this method are:

      -
        -
      • It is free and does not require a subscription fee.
      • -
      • It allows you to transfer the downloaded music to other devices or apps.
      • -
      • It works even if you lose your internet connection or delete the original video.
      • -
      -

      The cons of this method are:

      -
        -
      • It is illegal and violates YouTube's terms of service and the music owners' rights.
      • -
      • It is risky and may expose your device to malware, viruses, or pop-up ads.
      • -
      • It offers low-quality audio and video downloads.
      • -
      • It does not support offline playback on devices that do not have an MP3 player installed.
      • -
      -

      The pros and cons of each method

      -

      To summarize, here is a table that compares the pros and cons of each method:How to enjoy DJ Aint My Fault X Tokyo Drift Remix offline -

      Once you have downloaded DJ Aint My Fault X Tokyo Drift Remix from YouTube using your preferred method, you may want to enjoy it offline on your device. In this section, we will show you how to do that, as well as how to create a playlist with the remix and other songs, and how to share the remix with your friends and social media followers.

      -

      How to transfer the downloaded music to your device

      -

      If you have downloaded the remix using YouTube Music Premium or YouTube Premium, you can simply open the YouTube app on your device and go to your library to find your downloaded music. You can play it offline as long as you have an active subscription and an internet connection at least once every 30 days.

      -

      If you have downloaded the remix using 4K Video Downloader, MediaHuman, or online converters, you can transfer it to your device using a USB cable or a cloud service. Here are the steps for each option:

      -
        -
      • Using a USB cable: Connect your device to your computer using a USB cable. Open the folder where you saved the downloaded music on your computer. Drag and drop the music file to your device's music folder. Disconnect your device and open your music player app to find and play the music.
      • -
      • Using a cloud service: Upload the downloaded music file to a cloud service such as Google Drive, Dropbox, or OneDrive. Open the cloud service app on your device and download the music file to your device's storage. Open your music player app to find and play the music.
      • -
      -

      How to create a playlist with the remix and other songs

      -

      If you want to create a playlist with DJ Aint My Fault X Tokyo Drift Remix and other songs that match its vibe and style, you can use any music player app that allows you to create custom playlists. Here are some suggestions for apps and songs:

      -
        -
      • Spotify: Spotify is one of the most popular music streaming apps that offers millions of songs, podcasts, and playlists. You can create your own playlists by adding songs from Spotify's library or from your own device. You can also follow other users' playlists or discover new ones based on your preferences. Some of the songs that go well with DJ Aint My Fault X Tokyo Drift Remix are: See You Again by Wiz Khalifa feat. Charlie Puth, Lean On by Major Lazer feat. MØ and DJ Snake, Don't Let Me Down by The Chainsmokers feat. Daya, and Rockabye by Clean Bandit feat. Sean Paul and Anne-Marie.
      • -
      • Apple Music: Apple Music is another popular music streaming app that offers millions of songs, podcasts, and playlists. You can create your own playlists by adding songs from Apple Music's library or from your own device. You can also follow other users' playlists or discover new ones based on your preferences. Some of the songs that go well with DJ Aint My Fault X Tokyo Drift Remix are: Bad Guy by Billie Eilish, Blinding Lights by The Weeknd, Dance Monkey by Tones and I, and Senorita by Shawn Mendes and Camila Cabello.
      • -
      • Pandora: Pandora is a unique music streaming app that creates personalized radio stations based on your favorite songs, artists, or genres. You can create your own stations by entering a song name or an artist name, and Pandora will play similar songs that match your taste. You can also rate the songs with a thumbs up or a thumbs down to refine your station. Some of the songs that go well with DJ Aint My Fault X Tokyo Drift Remix are: Gasolina by Daddy Yankee, Despacito by Luis Fonsi feat. Daddy Yankee, Shape of You by Ed Sheeran, and Cheap Thrills by Sia feat. Sean Paul.
      • -
      -

      How to share the remix with your friends and social media followers

      -

      If you love DJ Aint My Fault X Tokyo Drift Remix so much that you want to share it with your friends and social media followers, you can do that easily using any of these methods:

      -
        -
      • Send them a link to the remix video on YouTube via text message, email, or any messaging app.
      • -
      • Post a link to the remix video on YouTube on your Facebook, Twitter, Instagram, or any other social media platform.
      • -
      • Make a TikTok video with the remix as the background music and show off your dance moves or lip-sync skills.
      • -
      • Use the remix as your ringtone or alarm tone and let your friends hear it when they call you or when you wake up.
      • -
      • Play the remix on your speakers or headphones and invite your friends to join you for a dance party or a workout session.
      • -
      -

      Conclusion

      -

      DJ Aint My Fault X Tokyo Drift Remix is a fantastic song that combines two popular tracks from different genres and cultures. It is catchy, energetic, and fun to listen to and dance to. You can download it from YouTube using various methods, but you should be aware of the legal and safety issues involved. You can also enjoy it offline on your device, create a playlist with it and other songs, and share it with your friends and social media followers. If you haven't heard it yet, you should definitely check it out and see for yourself why it is so viral. Here is a link to the remix video on YouTube:

      -

      FAQs

      -

      Here are some frequently asked questions about DJ Aint My Fault X Tokyo Drift Remix:

      -
        -
      1. Who is DJ NWP?
      2. -

        DJ NWP is a producer from Indonesia who makes EDM, jungle dutch, and breakbeat music. He has over 1.3 million subscribers on his YouTube channel, where he uploads his original songs and remixes. He also performs live shows and events in Indonesia and abroad.

        -
      3. What is the difference between a remix and a mashup?
      4. -

        A remix is a song that has been altered or modified by adding, removing, or changing elements such as the vocals, instruments, beats, or effects. A mashup is a song that has been created by combining two or more songs into one, usually by blending the vocals of one song with the instrumental of another.

        -
      5. How can I support DJ NWP and his music?
      6. -

        You can support DJ NWP and his music by subscribing to his YouTube channel, liking and commenting on his videos, sharing his music with your friends, following him on his social media accounts, buying his merchandise, or donating to his PayPal account. You can find all the links to his online platforms on his YouTube channel's about page.

        -
      7. What are some other popular remixes or mashups that I can listen to?
      8. -

        Some other popular remixes or mashups that you can listen to are: Old Town Road Remix by Lil Nas X feat. Billy Ray Cyrus, Closer x All Star by The Chainsmokers feat. Halsey and Smash Mouth, Shape of You x Despacito by Ed Sheeran, Luis Fonsi, and Daddy Yankee, and Girls Like You x One Kiss by Maroon 5 feat. Cardi B and Calvin Harris feat. Dua Lipa.

        -
      9. How can I learn how to make my own remixes or mashups?
      10. -

        You can learn how to make your own remixes or mashups by watching tutorials on YouTube or other online platforms, reading blogs or articles on music production, taking online courses or classes on music software or apps, or joining online communities or forums where you can get feedback and tips from other music makers.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Subway Surfers with Hack APK 1.80.1 Version.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Subway Surfers with Hack APK 1.80.1 Version.md deleted file mode 100644 index a16c07c87a253ece4a3d14af4f541a64de26ed86..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy Subway Surfers with Hack APK 1.80.1 Version.md +++ /dev/null @@ -1,83 +0,0 @@ - -

      Subway Surfers Hack APK Download 1.80.1: Everything You Need to Know

      -

      Subway Surfers is one of the most popular endless running games on Android, iOS, and Windows devices. The game features colorful graphics, smooth controls, and addictive gameplay, where you have to run away from the grumpy inspector and his dog, dodge trains and obstacles, collect coins and power-ups, and unlock new characters and boards.

      -

      But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited coins and keys, unlock all the characters and boards, and remove all the ads? Well, that's where Subway Surfers Hack APK comes in handy.

      -

      subway surfers hack apk download 1.80.1


      Download File ……… https://ssurll.com/2uNVXw



      -

      Subway Surfers Hack APK is a modified version of the original game that gives you access to all the features and benefits that you normally have to pay for or earn through hard work. With this hack, you can have more fun and excitement in playing Subway Surfers, without worrying about running out of resources or getting caught by the inspector.

      -

      But how do you download and install Subway Surfers Hack APK on your device? What are the advantages and disadvantages of using this hack? And are there any alternatives to it? In this article, we will answer all these questions and more, so keep reading to find out everything you need to know about Subway Surfers Hack APK download 1.80.1.

      -

      How to Download and Install Subway Surfers Hack APK 1.80.1

      -

      If you want to try out Subway Surfers Hack APK on your device, you will need to follow these simple steps:

      -
        -
      1. Enable unknown sources on your device. Since Subway Surfers Hack APK is not available on the official Google Play Store or App Store, you will need to allow your device to install apps from unknown sources. To do this, go to your device settings, security, and enable unknown sources.
      2. -
      3. Download the APK file from a trusted source. There are many websites that claim to offer Subway Surfers Hack APK for free download, but not all of them are safe or reliable. Some of them may contain malware or viruses that can harm your device or steal your personal information. Therefore, you should only download the APK file from a trusted source, such as [Subway Surfers Hack APK Download](^i^), where you can find the latest version of the hack with no surveys or passwords required.
      4. -
      5. Install the APK file and launch the game. Once you have downloaded the APK file, locate it in your device storage and tap on it to install it. Follow the instructions on the screen and wait for the installation to complete. Then, launch the game and enjoy Subway Surfers Hack APK 1.80.1.
      6. -
      -

      What are the Features and Benefits of Subway Surfers Hack APK 1.80.1

      -

      Subway Surfers Hack APK 1.80.1 is not just a regular version of the game with some minor tweaks. It is a fully loaded hack that offers you a lot of features and benefits that you won't find in the original game. Here are some of them:

      -
        -
      • Unlimited coins and keys. Coins and keys are the main currencies in Subway Surfers, and you need them to buy new characters, boards, outfits, upgrades, and more. Normally, you have to collect them by playing the game, watching ads, or spending real money. But with Subway Surfers Hack APK 1.80.1, you will have unlimited coins and keys at your disposal, so you can buy anything you want without any hassle.
      • -
      • All characters and boards unlocked. Subway Surfers has a lot of characters and boards to choose from, each with their own unique abilities and styles. However, most of them are locked and require you to complete certain tasks or pay a certain amount of coins or keys to unlock them. But with Subway Surfers Hack APK 1.80.1, you will have access to all the characters and boards from the start, so you can play with your favorite ones without any restrictions.
      • -
      • No ads and no root required. Ads can be annoying and distracting when you are playing Subway Surfers, especially when they pop up in the middle of your run or when you want to claim a reward. But with Subway Surfers Hack APK 1.80.1, you will not see any ads in the game, so you can enjoy a smooth and uninterrupted gaming experience. Moreover, you don't need to root your device to use this hack, which means you don't have to risk voiding your warranty or exposing your device to security threats.
      • -
      -

      What are the Risks and Drawbacks of Subway Surfers Hack APK 1.80.1

      -

      Subway Surfers Hack APK 1.80.1 may sound like a dream come true for many Subway Surfers fans, but it is not without its risks and drawbacks. Here are some of them:

      -
        -
      • Possible malware or virus infection. As mentioned earlier, not all sources that offer Subway Surfers Hack APK for free download are safe or reliable. Some of them may contain malicious software or viruses that can infect your device or steal your personal information. Therefore, you should always be careful when downloading any APK file from unknown sources, and scan it with a reputable antivirus program before installing it.
      • -
      • Potential ban from the official game server. Subway Surfers Hack APK is an unofficial and unauthorized modification of the original game, which means it violates the terms and conditions of the game developer and publisher. As such, using this hack may result in your account being banned from the official game server, which means you will not be able to play online with other players or access any updates or new features that may be added to the game in the future.
      • -
      • Legal and ethical issues. Subway Surfers Hack APK is also a form of cheating, which means it gives you an unfair advantage over other players who play the game legitimately. This may ruin the fun and challenge of the game for yourself and others, and also disrespect the hard work and creativity of the game developers who created Subway Surfers for your enjoyment. Furthermore, using this hack may also infringe on the intellectual property rights of the game developer and publisher, which may lead to legal consequences.
      • -
      -

      Alternatives to Subway Surfers Hack APK 1.80.1

      -

      If you are not comfortable with using Subway Surfers Hack APK 1.80.1 for any reason, or if you want to try something different, there are some alternatives that you can consider:

      -

      subway surfers mod apk 1.80.1 unlimited coins and keys
      -subway surfers 1.80.1 hack apk free download for android
      -subway surfers saint petersburg hack apk 1.80.1
      -subway surfers cheat apk download latest version 1.80.1
      -subway surfers hack apk download 2023 version 1.80.1
      -subway surfers modded apk 1.80.1 with all characters unlocked
      -subway surfers hack apk download no root 1.80.1
      -subway surfers unlimited money and keys apk 1.80.1
      -subway surfers hacked version download for android 1.80.1
      -subway surfers mod apk download rexdl 1.80.1
      -subway surfers hack apk download softpedia 1.80.1[^1^]
      -subway surfers hack apk download apkpure 1.80.1
      -subway surfers hack apk download uptodown 1.80.1
      -subway surfers hack apk download revdl 1.80.1
      -subway surfers hack apk download happymod 1.80.1
      -subway surfers hack apk download android 4.4+ 1.80.1
      -subway surfers hack apk download for pc windows 10 1.80.1
      -subway surfers hack apk download for ios iphone ipad 1.80.1
      -subway surfers hack apk download for kindle fire hd 8 1.80.1
      -subway surfers hack apk download for samsung galaxy s10+ 1.80.1
      -subway surfers mod menu apk download 2023 v 1.80.1
      -subway surfers mega mod apk download latest version 2023 v 1.80.1
      -subway surfers unlimited everything mod apk download v 1.80.1
      -subway surfers world tour saint petersburg mod apk download v 1.80.1
      -subway surfers nicolai frost outfit mod apk download v 1.80.1
      -subway surfers sleigh board mod apk download v 1.80.1
      -subway surfers jingles board mod apk download v 1.80.1
      -subway surfers hack online generator tool apk download v 1.80.1
      -subway surfers hack without human verification or survey apk download v 1.80.1
      -subway surfers hack with lucky patcher no root apk download v 1.80.1
      -how to install subway surfers hack apk on android device v 1.80.1
      -how to update subway surfers hack apk to latest version v 3.x.x from v 2.x.x or v 2.x.x from v 2.x.x or v 2.x.x from v 2.x.x or v 2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x from v2.x.x or v2.x.x fromv2x x orv x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x x

      -
        -
      • Use legit cheats and tips. There are some legit ways to get more coins and keys in Subway Surfers without using any hacks or mods. For example, you can watch ads, complete missions, participate in events, join a crew, connect to social media accounts, or use promo codes to get free rewards. You can also use some tips and tricks to improve your skills and performance in the game, such as choosing the right character and board combination, using power-ups wisely, avoiding collisions, collecting magnets and jetpacks, etc.
      • -
      • Play other similar games. Subway Surfers is not the only endless running game that you can play on your device. There are many other similar games that offer the same or even better gameplay, graphics, and features. Some of them are Temple Run, Jetpack Joyride, Minion Rush, Sonic Dash, and Talking Tom Gold Run. You can download and play these games from the official Google Play Store or App Store, and enjoy them without any hacks or mods.
      • -
      -

      Conclusion and FAQs

      -

      Subway Surfers Hack APK 1.80.1 is a modified version of the original game that gives you unlimited coins and keys, all characters and boards unlocked, no ads, and no root required. It is a great way to have more fun and excitement in playing Subway Surfers, without any limitations or restrictions.

      -

      However, Subway Surfers Hack APK 1.80.1 also comes with some risks and drawbacks, such as possible malware or virus infection, potential ban from the official game server, legal and ethical issues, and loss of interest and challenge in the game. Therefore, you should always be careful when downloading and installing this hack on your device, and use it at your own risk and responsibility.

      -

      If you are not comfortable with using Subway Surfers Hack APK 1.80.1, or if you want to try something different, you can also use some legit cheats and tips to get more coins and keys in the game, or play other similar games that offer the same or even better gameplay, graphics, and features.

      -

      We hope this article has helped you learn everything you need to know about Subway Surfers Hack APK download 1.80.1. If you have any questions or comments, feel free to leave them below. Here are some FAQs that may also help you:

      -

      FAQ 1: Is Subway Surfers Hack APK 1.80.1 safe to use?

      -

      Subway Surfers Hack APK 1.80.1 is not an official or authorized version of the game, which means it may contain malware or viruses that can harm your device or steal your personal information. Therefore, you should always download it from a trusted source, such as [Subway Surfers Hack APK Download](^i^), where you can find the latest version of the hack with no surveys or passwords required. You should also scan it with a reputable antivirus program before installing it on your device.

      -

      FAQ 2: How can I update Subway Surfers Hack APK 1.80.1?

      -

      Subway Surfers Hack APK 1.80.1 is not compatible with the official updates of the game, which means you will not be able to access any new features or content that may be added to the game in the future. If you want to update Subway Surfers Hack APK 1.80.1, you will need to download and install the latest version of the hack from a trusted source, such as [Subway Surfers Hack APK Download](^i^), where you can find the most recent version of the hack with no surveys or passwords required.

      -

      FAQ 3: Can I play Subway Surfers Hack APK 1.80.1 online with other players?

      -

      Subway Surfers Hack APK 1.80.1 is an offline version of the game, which means you can play it without an internet connection. However, this also means you will not be able to play online with other players or access any online features of the game, such as leaderboards, events, crews, etc.

      -

      FAQ 4: What is the difference between Subway Surfers Hack APK 1.80.1 and Subway Surfers Mod APK?

      -

      Subway Surfers Hack APK 1.80.1 and Subway Surfers Mod APK are both modified versions of the original game that offer unlimited coins and keys, all characters and boards unlocked, no ads, and no root required. However, Subway Surfers Hack APK 1.80.1 is a standalone version of the game that does not require you to install the original game on your device, while Subway Surfers Mod APK is a patch that requires you to install the original game on your device first.

      -

      FAQ 5: Where can I find more information about Subway Surfers Hack APK 1.80.1?

      -

      If you want to find more information about Subway Surfers Hack APK 1.80.1, such as how it works, what it offers, how to use it, etc., you can visit [Subway Surfers Hack APK Download](^i^), where you can find detailed guides and tutorials on how to download and install this hack on your device.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy the Fast and Furious with CarX Highway Racing MOD APK for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy the Fast and Furious with CarX Highway Racing MOD APK for Android.md deleted file mode 100644 index 3d30c8af7c16dd918b130e15cb55d1a259ceef89..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Enjoy the Fast and Furious with CarX Highway Racing MOD APK for Android.md +++ /dev/null @@ -1,14 +0,0 @@ -
      -

      CarX Highway Racing Mod APK for Android: A Thrilling Racing Game with Unlimited Money

      - If you are a fan of racing games, you might have heard of CarX Highway Racing, a realistic and exciting game that lets you drive fast cars on busy highways. But did you know that you can also enjoy this game with unlimited money and unlocked features? In this article, we will tell you everything you need to know about CarX Highway Racing Mod APK, a modified version of the original game that gives you more fun and freedom.

      What is CarX Highway Racing?

      - CarX Highway Racing is a racing game developed by CarX Technologies, the same company that created the popular CarX Drift Racing series. In this game, you can choose from over 50 different cars, each with its own characteristics and performance. You can also customize your cars with various parts and paint jobs.

      Features of CarX Highway Racing

      - Some of the features that make CarX Highway Racing stand out from other racing games are: - Realistic physics and graphics that simulate the feel of driving a real car on a real highway. - Day and night cycles, weather effects, and traffic situations that add variety and challenge to your races. - Different game modes, such as campaign, time attack, police chase, and online multiplayer, that offer different objectives and rewards. - A dynamic soundtrack that adapts to your speed and situation.

      How to play CarX Highway Racing

      - The gameplay of CarX Highway Racing is simple and intuitive. You can control your car using the on-screen buttons or tilt your device. You can also adjust the sensitivity and camera angle in the settings. Your goal is to race against other cars, avoid obstacles, and reach the finish line as fast as possible. You can earn money and reputation points by winning races, completing missions, and performing stunts. You can use the money to buy new cars or upgrade your existing ones. You can use the reputation points to unlock new game modes and locations.

      What is CarX Highway Racing Mod APK?

      - CarX Highway Racing Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the official version. For example, with CarX Highway Racing Mod APK, you can get unlimited money, which means you can buy any car or part you want without worrying about the cost. You can also get all the cars and locations unlocked from the start, which means you can enjoy the full content of the game without having to grind for reputation points.

      Benefits of CarX Highway Racing Mod APK

      - Some of the benefits that you can get from using CarX Highway Racing Mod APK are: - Unlimited money: You can buy any car or part you want without spending real money or waiting for ads. - Unlocked cars and locations: You can access all the cars and locations in the game without having to unlock them by playing the campaign or earning reputation points. - No ads: You can enjoy the game without being interrupted by annoying ads.

      How to download and install CarX Highway Racing Mod APK

      - To download and install CarX Highway Racing Mod APK on your Android device, you need to follow these steps: - Step 1: Go to and click on the download button to get the latest version of CarX Highway Racing Mod APK. - Step 2: After downloading the file, go to your device's settings and enable installation from unknown sources. This will allow you to install apps that are not from the Google Play Store. - Step 3: Locate the downloaded file in your device's file manager and tap on it to start the installation process. - Step 4: Follow the instructions on the screen and wait for the installation to finish. - Step 5: Launch the game and enjoy!

      Conclusion

      - CarX Highway Racing is a thrilling racing game that lets you experience the adrenaline rush of driving fast cars on busy highways. With realistic physics and graphics, different game modes, and a dynamic soundtrack, this game will keep you entertained for hours. However, if you want to have more fun and freedom, you can try CarX Highway Racing Mod APK, a modified version of the game that gives you unlimited money and unlocked features. With CarX Highway Racing Mod APK, you can buy any car or part you want, access all the locations in the game, and play without ads. All you need to do is download and install CarX Highway Racing Mod APK from and enjoy!

      FAQs

      - Here are some frequently asked questions about CarX Highway Racing Mod APK: - Q: Is CarX Highway Racing Mod APK safe to use? - A: Yes, CarX Highway Racing Mod APK is safe to use as long as you download it from a trusted source like . However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before installing them. - Q: Do I need to root my device to use CarX Highway Racing Mod APK? - A: No, you do not need to root your device to use CarX Highway Racing Mod APK. You can install and play it on any Android device that meets the minimum requirements of the game. - Q: Will I get banned from the game if I use CarX Highway Racing Mod APK? - A: No, you will not get banned from the game if you use CarX Highway Racing Mod APK. However, you should not use it to cheat or abuse the online multiplayer mode, as that might ruin the fun for other players and cause problems for the game developers. - Q: Can I update CarX Highway Racing Mod APK to the latest version of the game? - A: Yes, you can update CarX Highway Racing Mod APK to the latest version of the game by downloading and installing the new version from . However, you should always backup your game data before updating, as some updates might cause compatibility issues or data loss. - Q: Can I play CarX Highway Racing Mod APK offline? - A: Yes, you can play CarX Highway Racing Mod APK offline. However, some features of the game, such as online multiplayer mode, might require an internet connection to work properly.

      -

      carx highway racing mod apk for android


      DOWNLOADhttps://ssurll.com/2uNYlg



      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 APK Data for Android The Easiest and Fastest Way to Download GTA V on Mobile.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 APK Data for Android The Easiest and Fastest Way to Download GTA V on Mobile.md deleted file mode 100644 index be83d577b4efaa376426a3f3830d1e26986ac0ab..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/GTA 5 APK Data for Android The Easiest and Fastest Way to Download GTA V on Mobile.md +++ /dev/null @@ -1,102 +0,0 @@ -
      -

      Download GTA 5 Mobile Android APK Data

      -

      If you are a fan of Grand Theft Auto (GTA) series, you must have played GTA 5 on your PC or console. But did you know that you can also enjoy this amazing game on your android device? Yes, you heard it right. You can download GTA 5 mobile android apk data and play it on your smartphone or tablet. In this article, we will tell you everything you need to know about GTA 5 android apk, how to download and install it, and what features it offers. So, let's get started.

      -

      download gta 5 mobile android apk data


      Download ★★★★★ https://ssurll.com/2uNWHf



      -

      Introduction

      -

      What is GTA 5?

      -

      GTA 5 is the fifth installment of the popular Grand Theft Auto series, developed by Rockstar Games. It was released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. It is one of the best-selling video games of all time, with over 140 million copies sold worldwide. It is also one of the most critically acclaimed games, with an average score of 97 out of 100 on Metacritic.

      -

      Why download GTA 5 mobile android apk data?

      -

      GTA 5 is a game that offers endless entertainment and fun. You can explore the vast and diverse city of Los Santos and its surrounding areas, which are based on real-life locations in Southern California. You can follow the story of three protagonists: Michael, Franklin, and Trevor, who have their own personalities, backgrounds, and motivations. You can switch between them at any time and experience different perspectives and gameplay styles. You can also engage in various activities, such as driving, shooting, fighting, robbing, racing, flying, parachuting, golfing, tennis, yoga, hunting, scuba diving, and more. You can also interact with hundreds of characters and objects in the game world, which are dynamic and realistic.

      -

      However, playing GTA 5 on a PC or console requires a lot of hardware resources and storage space. Not everyone has access to such devices or can afford them. That's why downloading GTA 5 mobile android apk data is a great option for those who want to enjoy this game on their portable devices. You can play GTA 5 anytime and anywhere you want, without compromising on the quality or performance of the game. You can also save your progress online and resume it on any device.

      -

      How to download GTA 5 mobile android apk data

      -

      Requirements for GTA 5 android apk

      -

      Before you download GTA 5 mobile android apk data, you need to make sure that your device meets the minimum requirements for running the game smoothly. Here are the requirements:

      -

      How to download gta 5 mobile android apk data for free
      -Download gta 5 mobile android apk data full version
      -Download gta 5 mobile android apk data offline mode
      -Download gta 5 mobile android apk data with cheats
      -Download gta 5 mobile android apk data no verification
      -Download gta 5 mobile android apk data highly compressed
      -Download gta 5 mobile android apk data latest update
      -Download gta 5 mobile android apk data mod menu
      -Download gta 5 mobile android apk data from google drive
      -Download gta 5 mobile android apk data without root
      -Download gta 5 mobile android apk data with obb file
      -Download gta 5 mobile android apk data and install guide
      -Download gta 5 mobile android apk data with unlimited money
      -Download gta 5 mobile android apk data best graphics settings
      -Download gta 5 mobile android apk data compatible devices
      -Download gta 5 mobile android apk data real gameplay
      -Download gta 5 mobile android apk data new features
      -Download gta 5 mobile android apk data working link
      -Download gta 5 mobile android apk data safe and secure
      -Download gta 5 mobile android apk data original version
      -Download gta 5 mobile android apk data online multiplayer
      -Download gta 5 mobile android apk data with missions
      -Download gta 5 mobile android apk data size and requirements
      -Download gta 5 mobile android apk data review and rating
      -Download gta 5 mobile android apk data fan made version
      -Download gta 5 mobile android apk data beta version
      -Download gta 5 mobile android apk data with mods and hacks
      -Download gta 5 mobile android apk data easy steps
      -Download gta 5 mobile android apk data fast download speed
      -Download gta 5 mobile android apk data error fix
      -Download gta 5 mobile android apk data with sound and music
      -Download gta 5 mobile android apk data with custom controls
      -Download gta 5 mobile android apk data with cloud save
      -Download gta 5 mobile android apk data with voice chat
      -Download gta 5 mobile android apk data with realistic physics
      -Download gta 5 mobile android apk data with cars and bikes
      -Download gta 5 mobile android apk data with weapons and ammo
      -Download gta 5 mobile android apk data with maps and locations
      -Download gta 5 mobile android apk data with characters and skins
      -Download gta 5 mobile android apk data with story mode and side missions

      -
        -
      • An android device with at least Android version 4.0 (Ice Cream Sandwich) or higher.
      • -
      • A minimum of 4 GB of free storage space on your device or SD card.
      • -
      • A stable internet connection for downloading the game files and playing online.
      • -
      • A compatible controller or touch screen for controlling the game.
      • -
      -

      Steps to install GTA 5 apk obb files

      -

      Once you have checked the requirements, you can follow these simple steps to install GTA 5 apk obb files on your device:

      -
        -
      1. Download the GTA 5 apk file from a trusted source. You can use this link to download it safely.
      2. -
      3. Enable the installation of apps from unknown sources on your device settings.
      4. -
      5. Locate the downloaded apk file on your device or SD card and tap on it to install it.
      6. Wait for the installation to complete and then launch the game. -
      7. Download the GTA 5 obb files from the same source as the apk file. You can use this link to download them safely.
      8. -
      9. Extract the obb files using a file manager app or a zip extractor app.
      10. -
      11. Copy the extracted folder named com.rockstargames.gtav to the Android/obb folder on your device or SD card.
      12. -
      13. Restart the game and enjoy playing GTA 5 on your android device.
      14. -
      -

      How to play full GTA 5 on android

      -

      After you have installed GTA 5 apk obb files on your device, you can play the full version of GTA 5 on your android device. You can access all the features and modes of the game, such as story mode, online mode, social club, and more. You can also adjust the settings of the game according to your preference and device performance. Here are some tips on how to play GTA 5 on android:

      -
        -
      • To switch between the three protagonists, tap on the character icon on the top left corner of the screen.
      • -
      • To access the map, menu, and inventory, tap on the pause icon on the top right corner of the screen.
      • -
      • To control the movement of your character or vehicle, use the virtual joystick on the left side of the screen.
      • -
      • To perform actions, such as shooting, fighting, jumping, entering or exiting vehicles, use the buttons on the right side of the screen.
      • -
      • To change the camera angle, swipe on the screen with two fingers.
      • -
      • To zoom in or out, pinch on the screen with two fingers.
      • -
      -

      Features of GTA 5 android apk

      -

      High-quality graphics and sound

      -

      One of the most impressive features of GTA 5 android apk is its high-quality graphics and sound. The game uses advanced technology and optimization to deliver stunning visuals and realistic effects. You can see every detail of the environment, such as buildings, roads, trees, water, sky, weather, shadows, reflections, and more. You can also hear every sound of the game world, such as voices, music, gunfire, explosions, sirens, horns, and more. The game also supports HD resolution and 60 FPS for a smooth and immersive gaming experience.

      -

      Open-world gameplay and missions

      -

      GTA 5 android apk offers an open-world gameplay that lets you explore and interact with a huge and diverse game world. You can go anywhere you want and do anything you want in the game. You can follow the main story missions or do side missions and activities. You can also create your own adventures and challenges by using the in-game editor. The game has a dynamic and responsive system that changes according to your actions and choices. For example, if you commit a crime, you will attract the attention of the police and have to escape or fight them. If you help someone in need, you will earn their gratitude and respect. The game also has a day-night cycle and a weather system that affect the gameplay and atmosphere.

      Online multiplayer mode and social club

      -

      GTA 5 android apk also allows you to play online with other players from around the world. You can join the online multiplayer mode, which is called GTA Online, and create your own character and customize it. You can also join or create your own crew and cooperate or compete with other players in various modes and events. You can also access the social club, which is a platform that connects you with other GTA fans and lets you share your achievements, screenshots, videos, and more.

      -

      Customization and modding options

      -

      GTA 5 android apk gives you a lot of options to customize and mod your game according to your preference and taste. You can change the appearance and performance of your character, vehicles, weapons, and more. You can also download and install various mods that add new features, content, or enhancements to the game. For example, you can download mods that add new vehicles, maps, missions, characters, skins, weapons, cheats, and more. You can also use mods that improve the graphics, sound, gameplay, or performance of the game.

      -

      Conclusion

      -

      Summary of the article

      -

      In conclusion, GTA 5 is one of the best games ever made and you can download GTA 5 mobile android apk data to play it on your android device. You can enjoy all the features and modes of the game, such as high-quality graphics and sound, open-world gameplay and missions, online multiplayer mode and social club, customization and modding options, and more. You just need to follow the steps we have provided in this article to download and install GTA 5 apk obb files on your device. You can also use the tips we have given to play GTA 5 on android smoothly and easily.

      -

      FAQs about GTA 5 apk

      -

      Here are some frequently asked questions about GTA 5 apk:

      -
        -
      • Is GTA 5 apk safe to download?
        Yes, GTA 5 apk is safe to download if you use a trusted source. We have provided a link in this article that you can use to download GTA 5 apk safely.
      • -
      • Is GTA 5 apk free to download?
        Yes, GTA 5 apk is free to download. However, you may need to complete some verification steps before downloading the game files.
      • -
      • Is GTA 5 apk compatible with all android devices?
        No, GTA 5 apk is not compatible with all android devices. You need to have an android device that meets the minimum requirements for running the game smoothly.
      • -
      • Can I play GTA 5 offline on android?
        Yes, you can play GTA 5 offline on android. You only need an internet connection for downloading the game files and playing online.
      • -
      • Can I transfer my GTA 5 progress from PC or console to android?
        Yes, you can transfer your GTA 5 progress from PC or console to android. You need to have a Rockstar Games Social Club account and link it with your PC or console account. Then, you can log in with your Social Club account on your android device and sync your progress.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/deepVAE/vae_pl_module.py b/spaces/skf15963/summary/fengshen/examples/deepVAE/vae_pl_module.py deleted file mode 100644 index 15a7ebdf52983f5266cf446b2c9c83c994f7a4f7..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/deepVAE/vae_pl_module.py +++ /dev/null @@ -1,278 +0,0 @@ -# coding=utf-8 -# Copyright 2022 IDEA-CCNL The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch Della model. """ - -import os -import torch -import numpy as np -from fengshen.models.deepVAE.deep_vae import DeepVAE -from pytorch_lightning.core.lightning import LightningModule -from transformers.models.gpt2.configuration_gpt2 import GPT2Config -from transformers.models.bert.tokenization_bert import BertTokenizer -from fengshen.models.deepVAE.latent_connector import GPT2ForDecoderLatentConnector, GPT2ForEncoderLatentConnector -from transformers.optimization import AdamW, get_linear_schedule_with_warmup - - -class DeepVAEModule(LightningModule): - @classmethod - def add_module_specific_args(cls, parser): - group = parser.add_argument_group('vae', 'configurations') - group.add_argument("--checkpoint_path", type=str, default=None) - group.add_argument("--gpt2_model_path", type=str) - group.add_argument("--beta_kl_constraints_start", default=1, type=float, - help="min beta for all the latent z posterior vs prior kl loss") - group.add_argument("--beta_kl_constraints_stop", default=1, type=float, - help="max beta for all the latent z posterior vs prior kl loss") - group.add_argument("--beta_n_cycles", default=30, type=int, - help="number of cycles for kl loss ratio within an epoch") - group.add_argument("--freebit_kl_constraints", default=.1, type=float, - help="free bit for all the latent z kl loss") - group.add_argument("--latent_dim", default=256, type=int, - help="latent dimension of deepVAE Z") - group.add_argument("--learning_rate", default=5e-5, type=float, - help="The initial learning rate for Adam.") - group.add_argument("--weight_decay", default=0.0, type=float, - help="Weight deay if we apply some.") - group.add_argument("--adam_epsilon", default=1e-8, type=float, - help="Epsilon for Adam optimizer.") - group.add_argument("--max_grad_norm", default=1.0, type=float, - help="Max gradient norm.") - group.add_argument("--warmup_steps", default=0, type=int, - help="Linear warmup over warmup_steps.") - group.add_argument("--CVAE", action='store_true', - help="specify this argument if finetuning CVAE, otherwise ignore this argument") - - return parser - - @classmethod - def load_model(cls, args, labels_dict=None): - checkpoint = torch.load(os.path.join(args.checkpoint_path, 'mp_rank_00_model_states.pt')) - - latent_dim = checkpoint['latent_dim'] if ('latent_dim' in checkpoint.keys()) else args.latent_dim - labels_dict = checkpoint['label_dict'] if ('label_dict' in checkpoint.keys()) else labels_dict - - enc_config = GPT2Config.from_pretrained(args.gpt2_model_path) - tokenizer = BertTokenizer.from_pretrained(args.gpt2_model_path) - special_tokens_dict = {'bos_token': '', 'eos_token': ''} - # special_tokens_dict = {'bos_token': '', 'eos_token': '', 'additional_special_tokens': ['', '']} - tokenizer.add_special_tokens(special_tokens_dict) - encoder_model = GPT2ForEncoderLatentConnector(config=enc_config) - encoder_model.resize_token_embeddings(len(tokenizer)) - - dec_config = GPT2Config.from_pretrained(args.gpt2_model_path) - decoder_model = GPT2ForDecoderLatentConnector(config=dec_config, latent_dim=latent_dim) - decoder_model.resize_token_embeddings(len(tokenizer)) - - vae_model = DeepVAE(encoder_model, decoder_model, latent_dim=latent_dim, - hidden_dim=enc_config.hidden_size, layer_num=enc_config.num_hidden_layers, - pad_token_id=tokenizer.pad_token_id, unk_token_id=tokenizer.unk_token_id, - bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, - CVAE=args.CVAE) - - # TODO: all the related params should be loaded here! Including latent_nets, posterior_nets, prior_nets, pooling, decoder.transformer.Wv, decoder.transformer.Wz - anchor = 'module.model.' - start = len(anchor) - vae_dict = {key[start:]: val for key, val in checkpoint['module'].items() if anchor in key} - # comment out if not initialized from VAE - # if args.CVAE: - # # manually load prior and posterior if initialize CVAE model for the first time because of dim mismatch - # prior_post_dict = {key: vae_dict.pop(key) for key in list(vae_dict) if ('posterior_nets' in key or 'prior_nets' in key)} - # for idx in range(enc_config.num_hidden_layers): - # vae_model.posterior_nets[idx].weight.data[:, enc_config.hidden_size:] = prior_post_dict[f"posterior_nets.{idx}.weight"] - # vae_model.prior_nets[idx].weight.data[:, enc_config.hidden_size:] = prior_post_dict[f"prior_nets.{idx}.weight"] - # enc_wte_shape, dec_wte_shape = vae_dict['encoder.transformer.wte.weight'].shape[0], vae_dict['decoder.transformer.wte.weight'].shape[0] - # vae_model.encoder.transformer.wte.weight.data[:enc_wte_shape, :] = vae_dict.pop('encoder.transformer.wte.weight') - # vae_model.decoder.transformer.wte.weight.data[:dec_wte_shape, :] = vae_dict.pop('decoder.transformer.wte.weight') - # vae_model.decoder.lm_head.weight.data[:dec_wte_shape, :] = vae_dict.pop('decoder.lm_head.weight') - missing_keys, unexpected_keys = vae_model.load_state_dict(vae_dict, strict=False) - print(f"Vae model loading process: missing keys {missing_keys}, unexpected keys {unexpected_keys}") - - return vae_model, tokenizer - - def __init__( - self, - args, - train_steps=0, - labels_dict=None - ): - super().__init__() - # self.save_hyperparameters() - self.args = args - - if args.checkpoint_path is not None: - self.model, self.encoder_tokenizer, self.decoder_tokenizer, self.latent_dim, \ - self.labels_dict, self.args = DeepVAEModule.load_model(self.args, labels_dict=labels_dict) - else: - self.encoder_tokenizer = BertTokenizer.from_pretrained(self.args.encoder_model_path) - encoder_config = GPT2Config.from_pretrained(self.args.encoder_model_path) - special_tokens_dict = {'bos_token': '', 'eos_token': '', 'additional_special_tokens': ['', '']} - self.encoder_tokenizer.add_special_tokens(special_tokens_dict) - self.latent_dim = self.args.latent_dim - encoder = GPT2ForEncoderLatentConnector.from_pretrained(self.args.encoder_model_path, config=encoder_config) - # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. - encoder.resize_token_embeddings(len(self.encoder_tokenizer)) - - self.decoder_tokenizer = BertTokenizer.from_pretrained(self.args.decoder_model_path) - self.decoder_tokenizer.add_special_tokens(special_tokens_dict) - decoder_config = GPT2Config.from_pretrained(self.args.decoder_model_path) - self.labels_dict = labels_dict - decoder = GPT2ForDecoderLatentConnector.from_pretrained(self.args.decoder_model_path, config=decoder_config, - latent_dim=self.latent_dim) - - # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. - decoder.resize_token_embeddings(len(self.decoder_tokenizer)) - self.model = DeepVAE(encoder, decoder, latent_dim=self.args.latent_dim, - hidden_dim=encoder_config.hidden_size, layer_num=encoder_config.num_hidden_layers, - pad_token_id=self.decoder_tokenizer.pad_token_id, unk_token_id=self.decoder_tokenizer.unk_token_id, - bos_token_id=self.decoder_tokenizer.bos_token_id, eos_token_id=self.decoder_tokenizer.eos_token_id, - CVAE=args.CVAE) - - self.train_steps = train_steps - # TODO: adjust the cyclic schedule - self.beta_kl_constraints_list = self.get_cyclic_linear_beta_list(self.train_steps, - start=args.beta_kl_constraints_start, stop=args.beta_kl_constraints_stop, n_cycle=args.beta_n_cycles) - # self.mlm_probability_list = self.get_decoder_beta_list(self.train_steps, - # start=0., stop=1., n_cycle=args.beta_n_cycles) - # self.beta_kl_constraints_list = self.get_constant_ratio(self.train_steps, args.beta_kl_constraints) - self.mlm_probability_list = self.get_constant_ratio(self.train_steps, 0.) - # self.freebit_kl_constraints = args.freebit_kl_constraints - - def get_constant_ratio(self, n_steps, ratio): - L = np.ones(n_steps) - L *= ratio - return L - - def get_decoder_beta_list(self, n_steps, start=0., stop=1.0, n_cycle=4): - L = np.ones(n_steps) - t_range = int(n_steps / n_cycle) - for t_cur in range(n_steps): - if t_cur > t_range: - L[t_cur] = 0. - else: - ratio = t_cur / t_range - value = stop - ratio * (stop-start) - L[t_cur] = value - return L - - def get_cyclic_linear_beta_list(self, n_steps, start=0.5, stop=1.0, n_cycle=4): - L = np.ones(n_steps) - t_range = int(n_steps / n_cycle) - for t_cur in range(n_steps): - loc = t_cur % t_range - split_range = int(t_range * 0.25) - if loc <= 2*split_range: - value = start - elif loc <= 3*split_range: - ratio = (loc % split_range) / split_range - value = ratio * (stop-start) - else: - value = stop - L[t_cur] = value - return L - - ##### - # Torch lightning - ##### - - def on_save_checkpoint(self, checkpoint) -> None: - checkpoint['label_dict'] = self.labels_dict - checkpoint['latent_dim'] = self.latent_dim - - def training_step(self, batch, batch_idx): - if batch is None: - loss = torch.Tensor([0.]).to(next(self.model.parameters()).device) - loss.requires_grad = True - return loss - inputs, cond_inputs = batch, None - if self.args.CVAE: - inputs, cond_inputs = batch - - total_loss, rec_loss, total_kl_loss, layer_kl_loss = \ - self.model(inputs, self.beta_kl_constraints_list[batch_idx], cond_inputs) - # the logging interval are set by the trainer_args log_every_n_steps - for idx, pg in enumerate(self.optimizers().param_groups): - self.log(f"learning_rate_{idx}", pg['lr']) - unscaled_kl_constraint_loss = 0. if self.beta_kl_constraints_list[batch_idx] == 0. else total_kl_loss/self.beta_kl_constraints_list[batch_idx] - self.log("total_loss", total_loss) - self.log("total_kl_constraint_loss", total_kl_loss) - self.log("unscaled_kl_constraint_loss", unscaled_kl_constraint_loss) - self.log("beta_kl_constraints", self.beta_kl_constraints_list[batch_idx]) - self.log("beta_mlm_probability", self.mlm_probability_list[batch_idx]) - self.log("rec_loss", rec_loss) - for idx, kl_loss in enumerate(layer_kl_loss): - self.log(f"layer_{idx}_kl_loss", kl_loss.mean()) - - return total_loss - - def training_step_end(self, batch_parts): - pass - - def training_epoch_end(self, outputs): - pass - - def validation_step(self, batch, batch_idx): - if batch is None: - loss = torch.Tensor([0.]).to(next(self.model.parameters()).device) - loss.requires_grad = True - return loss - inputs, cond_inputs = batch, None - if self.args.CVAE: - inputs, cond_inputs = batch - - total_loss, rec_loss, total_kl_loss, layer_kl_loss = self.model(inputs, 1., cond_inputs) - # the logging interval are set by the trainer_args log_every_n_steps - self.log("val_total_loss", total_loss) - self.log("val_kl_constraint_loss", total_kl_loss) - self.log("val_recon_loss", rec_loss) - for idx, kl_loss in enumerate(layer_kl_loss): - self.log(f"layer_{idx}_kl_loss", kl_loss.mean()) - return total_loss - - def validation_epoch_end(self, outputs): - pass - - def test_step(self, batch, batch_idx): - if batch is None: - loss = torch.Tensor([0.]).to(next(self.model.parameters()).device) - loss.requires_grad = True - return loss - inputs, cond_inputs = batch, None - if self.args.CVAE: - inputs, cond_inputs = batch - total_loss, rec_loss, total_kl_loss, layer_kl_loss = self.model(inputs, 1., cond_inputs) - self.log("test_total_loss", total_loss) - self.log("test_recon_loss", rec_loss) - self.log("test_kl_constraint_loss", total_kl_loss) - for idx, kl_loss in enumerate(layer_kl_loss): - self.log(f"layer_{idx}_kl_loss", kl_loss.mean()) - return total_loss - - def configure_optimizers(self): - no_decay = ['bias', 'LayerNorm.weight'] - optimizer_grouped_parameters = [ - {'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': self.args.weight_decay}, - {'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} - ] - - optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.train_steps) - - return {'optimizer': optimizer, - 'lr_scheduler': { - 'scheduler': scheduler, - 'interval': 'step', - 'frequency': 1 - } - } diff --git a/spaces/skf15963/summary/fengshen/examples/hubert/pretrain_hubert_base.sh b/spaces/skf15963/summary/fengshen/examples/hubert/pretrain_hubert_base.sh deleted file mode 100644 index 11e5ddf38361d51910c35b02f10b7e285ab3f0fb..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/hubert/pretrain_hubert_base.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=pretrain_bart # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=8 # number of tasks to run per node -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:8 # number of gpus per node -#SBATCH -o %x-%j.log # output and error log file names (%x for job id) -#SBATCH -x dgx050 - -MODEL_NAME=hubert-base-ls960 -config_json="./$MODEL_NAME.ds_config.json" -export MASTER_PORT=29503 -MICRO_BATCH_SIZE=8 -ZERO_STAGE=1 - -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $config_json -{ - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "fp16": { - "enabled": true, - "loss_scale": 0, - "loss_scale_window": 1000, - "initial_scale_power": 16, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "tensorboard": { - "enabled": true, - "output_path": "/data/training_model/fengshen-${MODEL_NAME}/ds-tb-logs", - "job_name": "${MODEL_NAME}" - }, - "#flops_profiler": { - "enabled": true, - "profile_step": 200, - "detailed": true, - "output_file": null - }, - "steps_per_print": 100, - "gradient_clipping": 1, - "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE, - "zero_allow_untested_optimizer": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json -export TORCH_EXTENSIONS_DIR=/home/gaoxinyu/torch_extendsions - -DATA_DIR=/data/common_data/librispeech_tsv/datas -LABELS_DIR=/data/common_data/librispeech_tsv/labels - -DATA_ARGS="\ - --dataloader_workers 2 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize 32 \ - --test_batchsize 8 \ - --val_datasets_field valid \ - --test_datasets_field valid \ - --sampler_type random \ - --data ${DATA_DIR} \ - --label_dir ${LABELS_DIR} \ - --labels km \ - --label_rate 100 \ - --max_sample_size 250000 \ - --min_sample_size 32000 \ - --pad_audio False \ - --random_crop True \ - --normalize False \ - " - -MODEL_ARGS="\ - --model_path /data/pretrained_model/$MODEL_NAME/ \ - --learning_rate 1e-4 \ - --weight_decay 1e-2 \ - --warmup_ratio 0.01 \ - --pred_masked_weight 1.0 \ - --loss_weights 10 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor train_loss \ - --save_top_k 0 \ - --mode min \ - --every_n_train_steps 10000 \ - --dirpath /data/training_model/ckpt/fengshen-$MODEL_NAME \ - --filename model-{step:02d}-{train_loss:.4f} \ - --every_n_epochs 0 \ - --save_last \ - --not_save_on_train_epoch_end \ - " - -# deepspeed_stage_${ZERO_STAGE} \ -TRAINER_ARGS="\ - --gradient_clip_val 1.0 \ - --max_epochs 10 \ - --gpus 2 \ - --num_nodes 1 \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --log_every_n_steps 100 \ - --val_check_interval 500 \ - --limit_val_batches 10 \ - --accumulate_grad_batches 1 \ - --precision 16 \ - --ckpt_path /data/training_model/ckpt/fengshen-${MODEL_NAME}/last.ckpt \ - --default_root_dir /data/training_model/fengshen-$MODEL_NAME \ - " - - -export options=" \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -export SCRIPT_PATH=pretrain_hubert.py - -eval python3 -m debugpy --listen localhost:53005 --wait-for-client $SCRIPT_PATH $options diff --git a/spaces/souljoy/ChatPDF/README.md b/spaces/souljoy/ChatPDF/README.md deleted file mode 100644 index f7d6b2fa3b90cd7bbf5a3958569e5a68cf84865a..0000000000000000000000000000000000000000 --- a/spaces/souljoy/ChatPDF/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatPDF -emoji: 💻 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sqc1729/bingi/src/lib/bots/bing/tts.ts b/spaces/sqc1729/bingi/src/lib/bots/bing/tts.ts deleted file mode 100644 index cd10b7d1d7581bf9cf46ff6755fcca550c558c9b..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/src/lib/bots/bing/tts.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { sleep } from './utils' - -const synth = window.speechSynthesis - -export class TTS { - currentText = '' - speakText = '' - private controller = new AbortController() - speaking = false - get isSpeaking() { - return this.speaking - } - finished = false - constructor() {} - abort = () => { - this.controller.abort() - } - - reset = () => { - this.speaking = false - this.finished = true - this.currentText = '' - this.speakText = '' - this.abort() - } - - speak = (text: string) => { - if (!synth || text?.trim()?.length < 2) { - return - } - this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '') - this.finished = false - this.loop() - } - - private async doSpeek() { - return new Promise((resolve) => { - const endIndex = this.finished ? this.currentText.length : - Math.max( - this.currentText.lastIndexOf('。'), - this.currentText.lastIndexOf(';'), - this.currentText.lastIndexOf('、'), - this.currentText.lastIndexOf('?'), - this.currentText.lastIndexOf('\n') - ) - const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0 - - if (startIndex >= endIndex) { - return resolve(true) - } - const text = this.currentText.slice(startIndex, endIndex) - this.speakText = text - const utterThis = new SpeechSynthesisUtterance(text) - this.controller.signal.onabort = () => { - synth.cancel() - this.finished = true - resolve(false) - } - - utterThis.onend = function (event) { - resolve(true) - } - - utterThis.onerror = function (event) { - resolve(false) - } - - const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null - utterThis.voice = voice - synth.speak(utterThis) - }) - } - - private async loop() { - if (this.speaking) return - this.speaking = true - while(!this.finished) { - await Promise.all([sleep(1000), this.doSpeek()]) - } - this.speaking = false - } -} diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py deleted file mode 100644 index d6ee9c4a3602be9db8ddfe67d41ce8a96a98ad1e..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import logging -import os -import contextlib - -import numpy as np -import torch - -from fairseq.data import FairseqDataset, data_utils - - -logger = logging.getLogger(__name__) - - -class ExtractedFeaturesDataset(FairseqDataset): - def __init__( - self, - path, - split, - min_length=3, - max_length=None, - labels=None, - label_dict=None, - shuffle=True, - sort_by_length=True, - ): - super().__init__() - - self.min_length = min_length - self.max_length = max_length - self.shuffle = shuffle - self.sort_by_length = sort_by_length - self.label_dict = label_dict - - if labels is not None: - assert label_dict is not None - - self.sizes = [] - self.offsets = [] - self.labels = [] - - path = os.path.join(path, split) - data_path = path - self.data = np.load(data_path + ".npy", mmap_mode="r") - - offset = 0 - skipped = 0 - - if not os.path.exists(path + f".{labels}"): - labels = None - - with open(data_path + ".lengths", "r") as len_f, open( - path + f".{labels}", "r" - ) if labels is not None else contextlib.ExitStack() as lbl_f: - for line in len_f: - length = int(line.rstrip()) - lbl = None if labels is None else next(lbl_f).rstrip().split() - if length >= min_length and ( - max_length is None or length <= max_length - ): - self.sizes.append(length) - self.offsets.append(offset) - if lbl is not None: - self.labels.append(lbl) - offset += length - - self.sizes = np.asarray(self.sizes) - self.offsets = np.asarray(self.offsets) - - logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples") - - def __getitem__(self, index): - offset = self.offsets[index] - end = self.sizes[index] + offset - feats = torch.from_numpy(self.data[offset:end].copy()).float() - - res = {"id": index, "features": feats} - if len(self.labels) > 0: - res["target"] = self.label_dict.encode_line( - self.labels[index], - line_tokenizer=lambda x: x, - append_eos=False, - ) - - return res - - def __len__(self): - return len(self.sizes) - - def collater(self, samples): - if len(samples) == 0: - return {} - - features = [s["features"] for s in samples] - sizes = [len(s) for s in features] - - target_size = max(sizes) - - collated_features = features[0].new_zeros( - len(features), target_size, features[0].size(-1) - ) - padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False) - for i, (f, size) in enumerate(zip(features, sizes)): - collated_features[i, :size] = f - padding_mask[i, size:] = True - - res = { - "id": torch.LongTensor([s["id"] for s in samples]), - "net_input": {"features": collated_features, "padding_mask": padding_mask}, - } - - if len(self.labels) > 0: - target = data_utils.collate_tokens( - [s["target"] for s in samples], - pad_idx=self.label_dict.pad(), - left_pad=False, - ) - res["target"] = target - return res - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - return self.sizes[index] - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - order = [np.random.permutation(len(self))] - else: - order = [np.arange(len(self))] - - if self.sort_by_length: - order.append(self.sizes) - return np.lexsort(order)[::-1] - else: - return order[0] diff --git a/spaces/starlit7/USPoliticsTTS/modules.py b/spaces/starlit7/USPoliticsTTS/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/starlit7/USPoliticsTTS/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/stomexserde/gpt4-ui/Examples/HOT Full AutoCAD LT For Mac 2017 Key.md b/spaces/stomexserde/gpt4-ui/Examples/HOT Full AutoCAD LT For Mac 2017 Key.md deleted file mode 100644 index 9ff8d6d58a83c3344cb649f585431185b3c5627d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/HOT Full AutoCAD LT For Mac 2017 Key.md +++ /dev/null @@ -1,42 +0,0 @@ - -

      FULL AutoCAD LT For Mac 2017 Key: How to Get It and Why You Need It

      - -

      If you are looking for a professional and reliable software for 2D drafting and design, you might want to consider FULL AutoCAD LT For Mac 2017 Key. This is a product key that allows you to activate and use the full version of AutoCAD LT for Mac 2017, a software that is specially designed for Mac users who need to create precise and detailed drawings.

      -

      FULL AutoCAD LT For Mac 2017 Key


      DOWNLOADhttps://urlgoal.com/2uI9Ns



      - -

      In this article, we will explain what FULL AutoCAD LT For Mac 2017 Key is, how to get it, and why you need it. We will also show you some of the features and benefits of using AutoCAD LT for Mac 2017, and how it can help you improve your productivity and quality of work.

      - -

      What is FULL AutoCAD LT For Mac 2017 Key?

      - -

      FULL AutoCAD LT For Mac 2017 Key is a product key that you can purchase online from authorized resellers or directly from Autodesk, the developer of AutoCAD LT. A product key is a unique code that verifies that you have a legitimate copy of the software and that you are entitled to use it.

      - -

      When you buy FULL AutoCAD LT For Mac 2017 Key, you will receive an email with the product key and a link to download the software installer. You will need to enter the product key during the installation process to activate the software. Once activated, you can use the full version of AutoCAD LT for Mac 2017 without any limitations or restrictions.

      - -

      How to Get FULL AutoCAD LT For Mac 2017 Key?

      - -

      There are two ways to get FULL AutoCAD LT For Mac 2017 Key: either by buying it online or by subscribing to Autodesk's subscription plan.

      - -

      If you want to buy FULL AutoCAD LT For Mac 2017 Key online, you can choose from various resellers that offer different prices and payment options. Some of the reputable resellers are:

      - -
        -
      • CADstore.net: This is a trusted online store that sells various CAD software and product keys. You can buy FULL AutoCAD LT For Mac 2017 Key for $349.00 with free shipping and instant delivery.
      • -
      • CAD Software Direct: This is another reliable online store that offers a wide range of CAD software and product keys. You can buy FULL AutoCAD LT For Mac 2017 Key for $399.00 with free shipping and instant delivery.
      • -
      • CADPRO Systems: This is a leading online store that specializes in CAD software and product keys. You can buy FULL AutoCAD LT For Mac 2017 Key for $449.00 with free shipping and instant delivery.
      • -
      - -

      If you want to subscribe to Autodesk's subscription plan, you can choose from monthly, yearly, or multi-year plans. The subscription plan gives you access to the latest version of AutoCAD LT for Mac, as well as updates, support, and cloud services. The subscription plan costs $50 per month, $400 per year, or $1080 for three years.

      - -

      Why You Need FULL AutoCAD LT For Mac 2017 Key?

      - -

      You might be wondering why you need FULL AutoCAD LT For Mac 2017 Key when you can use the free trial version of AutoCAD LT for Mac 2017. The answer is simple: the free trial version has some limitations that can affect your work and performance.

      -

      - -

      Some of the limitations of the free trial version are:

      - -
        -
      • You can only use it for 30 days.
      • -
      • You cannot save or print your drawings.
      • -
      • You cannot export or import your drawings to other formats.
      • -
      • You cannot customize or

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Hide ALL IP 2020.01.13 Crack Full.md b/spaces/stomexserde/gpt4-ui/Examples/Hide ALL IP 2020.01.13 Crack Full.md deleted file mode 100644 index 589d650b96bf2afa510b215fe3741b2f2908bf5a..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Hide ALL IP 2020.01.13 Crack Full.md +++ /dev/null @@ -1,139 +0,0 @@ - -

        Hide ALL IP 2020.01.13 Crack Full: How to Surf Anonymously and Protect Your Online Identity

        -

        Do you want to hide your real IP address and surf the internet anonymously? Do you want to prevent identity theft, hacker attacks, and online tracking? Do you want to access internet TV, geo-restricted websites, and games without any restrictions? If you answered yes to any of these questions, then you need Hide ALL IP 2020.01.13 Crack Full.

        -

        Hide ALL IP 2020.01.13 Crack Full is the latest version of the world's best IP hide software that allows you to hide all your applications and games IP from snoopers and hackers. You can surf anonymously, encrypt all your data, and enjoy the freedom of the internet with just one click.

        -

        Hide ALL IP 2020.01.13 Crack Full


        Download 🆓 https://urlgoal.com/2uIaYY



        -

        In this article, we will show you what Hide ALL IP is, why you need it, how to download and install it, how to use it, and what benefits it offers. By the end of this article, you will be able to surf the internet with complete anonymity and security.

        -

        What is Hide ALL IP and why do you need it?

        -

        Hide ALL IP is a software that lets you hide your actual IP address and replace it with a fake one from a different country or region. This way, you can make yourself appear as if you are browsing from somewhere else, and avoid being tracked or traced by anyone.

        -

        -

        There are many reasons why you might want to hide your IP address, such as:

        -
          -
        • To protect your online identity and privacy from hackers, snoopers, ISPs, government agencies, advertisers, etc.
        • -
        • To access internet TV, streaming services, websites, games, and other content that are blocked or restricted in your region due to geo-restrictions or censorship.
        • -
        • To reduce your game ping and lag by connecting to a server closer to the game server.
        • -
        • To bypass firewalls, proxies, VPNs, and other network restrictions that might prevent you from accessing certain websites or applications.
        • -
        • To encrypt all your data and traffic so that no one can intercept or monitor your online activities.
        • -
        -

        Hide ALL IP offers several features that make it stand out from other IP hide software, such as:

        -

        Hide ALL IP is the world's best IP hide software

        -

        Hide ALL IP is not just an ordinary proxy or VPN service that only hides your browser's IP address. It is a powerful software that hides all your applications and games' IP address from snoopers and hackers.

        -

        Hide ALL IP works with any application or game that uses TCP/UDP protocols, such as browsers, instant messengers, video players, games, etc. You can also use it with HTTP tunnel mode to bypass any firewall or proxy.

        -

        Hide ALL IP has a large number of servers in different countries and regions that you can choose from. You can also switch between servers easily with just one click.

        -

        Hide ALL IP protects your online identity and privacy

        -

        Hide ALL IP not only hides your IP address, but also encrypts all your data and traffic with industry-standard AES/DES encryption. This means that no one can see what you are doing online, or steal your personal information, passwords, credit card numbers, etc.

        -

        Hide ALL IP also supports UDP-based applications and games, such as PUBG, Fortnite, League of Legends, etc. Unlike other VPN or proxy services that may drop your connection or slow down your speed, Hide ALL IP provides a fast and stable connection for your gaming needs.

        -

        Hide ALL IP also has a unique feature called "Mobile Hotspot". You can share your fake IP with your mobile devices, such as smartphones, tablets, etc. This way, you can enjoy the same level of anonymity and security on all your devices.

        -

        Hide ALL IP supports almost all applications and games

        -

        Hide ALL IP is compatible with almost all applications and games that use TCP/UDP protocols. You can hide your IP address for any browser, instant messenger, video player, game, etc. You can also use it with HTTP tunnel mode to bypass any firewall or proxy.

        -

        Some of the applications and games that Hide ALL IP supports are:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        BrowserInstant MessengerVideo PlayerGame
        ChromeSkypeVLCPUBG
        FirefoxWhatsAppKMPlayerFortnite
        EdgeWeChatGOM PlayerLeague of Legends
        SafariTelegramPotPlayerMinecraft
        OperaViberMPC-HCGTA V
        Tor BrowserLineMX PlayerCall of Duty
        -

        And many more. You can check the full list of supported applications and games on the official website of Hide ALL IP.

        -

        How to download and install Hide ALL IP 2020.01.13 Crack Full?

        -

        If you want to use Hide ALL IP 2020.01.13 Crack Full, you need to download and install it on your computer. Here are the steps to do so:

        -

        Download Hide ALL IP 2020.01.13 from a reliable source

        -

        The first step is to download Hide ALL IP 2020.01.13 from a reliable source. You can find the download link on the official website of Hide ALL IP, or on other trusted websites that offer software downloads.

        -

        Make sure that you download the latest version of Hide ALL IP, which is 2020.01.13, as it has the most updated features and bug fixes.

        -

        Also, make sure that you download the crack file along with the setup file, as it will help you activate the full version of Hide ALL IP without paying any fees.

        -

        Disable Windows Defender and antivirus software

        -

        The next step is to disable Windows Defender and any antivirus software that you have on your computer. This is because these programs might interfere with the installation process of Hide ALL IP, or detect the crack file as a virus or malware.

        -

        To disable Windows Defender, go to Settings > Update & Security > Windows Security > Virus & threat protection > Manage settings, and turn off the Real-time protection toggle.

        -

        To disable your antivirus software, go to its settings and look for an option to turn it off temporarily or permanently.

        -

        Remember to enable Windows Defender and your antivirus software again after you finish installing Hide ALL IP.

        -

        Extract the zip file and run the setup file

        -

        The final step is to extract the zip file that contains the setup file and the crack file of Hide ALL IP. You can use any software that can extract zip files, such as WinRAR, 7-Zip, etc.

        -

        After extracting the zip file, open the folder and run the setup file as an administrator. Follow the instructions on the screen to install Hide ALL IP on your computer.

        -

        After installing Hide ALL IP, do not run it yet. Instead, copy the crack file and paste it into the installation directory of Hide ALL IP. This will overwrite the original file and activate the full version of Hide ALL IP.

        -

        Now you can run Hide ALL IP and enjoy its features.

        -

        How to use Hide ALL IP 2020.01.13 Crack Full?

        -

        Using Hide ALL IP 2020.01.13 Crack Full is very easy and simple. Here are the steps to use it:

        -

        Connect to a server of your choice

        -

        The first step is to connect to a server of your choice from the list of servers that Hide ALL IP provides. You can choose a server based on its country, region, city, or ping.

        -

        To connect to a server, simply click on it and wait for a few seconds until it shows a green check mark. This means that you are connected successfully.

        -

        Change your IP address and location

        -

        The next step is to change your IP address and location according to the server that you have connected to. You can see your fake IP address and location on the main interface of Hide ALL IP.

        -

        You can also check your real IP address and location by clicking on "Show My Real Location" button. This will open a web page that will display your actual information.

        -

        You can also change your fake IP address and location manually by clicking on "Change Your Location" button. This will open a window where you can enter any IP address or location that you want.

        -

        Encrypt all your data and traffic

        -

        The final step is to encrypt all your data and traffic with Hide ALL IP's encryption technology. This will ensure that no one can see what you are doing online, or steal your personal information, passwords, credit card numbers, etc.

        -

        To encrypt all your data and traffic, simply click on "Encrypt All Data" button on the main interface of Hide ALL IP. This will turn on the encryption mode and show a lock icon next to it.

        -

        You can also customize the encryption settings by clicking on "Encryption Settings" button. This will open a window where you can choose the encryption algorithm, key length, port number, etc.

        -

        What are the benefits of using Hide ALL IP 2020.01.13 Crack Full?

        -

        Using Hide ALL IP 2020.01.13 Crack Full has many benefits that make it the best IP hide software in the market. Some of the benefits are:

        -

        Access internet TV and geo-restricted websites

        -

        One of the benefits of using Hide ALL IP 2020.01.13 Crack Full is that you can access internet TV, streaming services, websites, and other content that are blocked or restricted in your region due to geo-restrictions or censorship.

        -

        For example, you can watch Netflix, Hulu, BBC iPlayer, HBO, Disney+, etc. from anywhere in the world by connecting to a server in the country where these services are available.

        -

        You can also access websites and applications that are banned or blocked in your country, such as Facebook, Twitter, YouTube, WhatsApp, etc. by connecting to a server in a different country where these websites and applications are not restricted.

        -

        Reduce your game ping and lag

        -

        Another benefit of using Hide ALL IP 2020.01.13 Crack Full is that you can reduce your game ping and lag by connecting to a server closer to the game server.

        -

        This will improve your gaming experience and performance, especially for online multiplayer games that require low latency and high speed.

        -

        For example, you can play PUBG, Fortnite, League of Legends, etc. with less ping and lag by connecting to a server in the same region as the game server.

        -

        Bypass firewalls and censorship

        -

        A third benefit of using Hide ALL IP 2020.01.13 Crack Full is that you can bypass firewalls, proxies, VPNs, and other network restrictions that might prevent you from accessing certain websites or applications.

        -

        This is because Hide ALL IP uses HTTP tunnel mode to tunnel all your data and traffic through any firewall or proxy. This way, you can access any website or application without any limitations or interference.

        -

        For example, you can access Gmail, Google Drive, Dropbox, etc. from your school or office network by connecting to a server outside your network.

        -

        Conclusion

        -

        In conclusion, Hide ALL IP 2020.01.13 Crack Full is the best IP hide software that allows you to hide all your applications and games IP from snoopers and hackers. You can surf anonymously, encrypt all your data, and enjoy the freedom of the internet with just one click.

        -

        You can download and install Hide ALL IP 2020.01.13 Crack Full from a reliable source, disable Windows Defender and antivirus software, extract the zip file and run the setup file, copy the crack file and paste it into the installation directory, and run Hide ALL IP.

        -

        You can use Hide ALL IP 2020.01.13 Crack Full to connect to a server of your choice, change your IP address and location, encrypt all your data and traffic, access internet TV and geo-restricted websites, reduce your game ping and lag, bypass firewalls and censorship, and more.

        -

        If you want to surf the internet anonymously and protect your online identity, then you should try Hide ALL IP 2020.01.13 Crack Full today.

        -

        FAQs

        -

        Here are some frequently asked questions about Hide ALL IP 2020.01.13 Crack Full:

        -
          -
        • Is Hide ALL IP safe to use?
        • -
        • Yes, Hide ALL IP is safe to use as it does not contain any viruses or malware. It also encrypts all your data and traffic with AES/DES encryption, so no one can see what you are doing online or steal your personal information.
        • -
        • Does Hide ALL IP work with Windows 10?
        • -
        • Yes, Hide ALL IP works with Windows 10 as well as other versions of Windows such as Windows 8.1/8/7/Vista/XP/2000 (32-bit/64-bit).
        • -
        • How much does Hide ALL IP cost?
        • -
        • The official price of Hide ALL IP is $29 per year for one license. However, you can use Hide ALL IP 2020.01.13 Crack Full for free by downloading it from a reliable source and activating it with the crack file.
        • -
        • Can I use Hide ALL IP on multiple devices?
        • -
        • Yes, you can use Hide ALL IP on multiple devices by sharing your fake IP with your mobile devices using the Mobile Hotspot feature of Hide ALL IP. This way, you can enjoy the same level of anonymity and security on all your devices.
        • -
        • Can I use Hide ALL IP with other VPN or proxy services?
        • -
        • No, you cannot use Hide ALL IP with other VPN or proxy services as they might conflict with Hide ALL IP's settings and functions. You should only use Hide ALL IP as your sole IP hide software.
        • -

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/audiocraft/modules/transformer.py b/spaces/studiobrn/SplitTrack/audiocraft/modules/transformer.py deleted file mode 100644 index be6a5e420fc53eebe9947aa5dde7bfebd3cb4dad..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/audiocraft/modules/transformer.py +++ /dev/null @@ -1,704 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Transformer model, with streaming support, xformer attention support -and easy causal attention with a potentially finite receptive field. - -See `StreamingTransformer` for more information. - -Unlike regular PyTorch Transformer, we make the hard choice that batches are first. -""" - -import typing as tp - -from einops import rearrange -import torch -import torch.nn as nn -from torch.nn import functional as F -from torch.utils.checkpoint import checkpoint as torch_checkpoint -from xformers import ops - -from .rope import RotaryEmbedding -from .streaming import StreamingModule - - -def _is_profiled() -> bool: - # Return true if we are currently running with a xformers profiler activated. - try: - from xformers.profiler import profiler - except ImportError: - return False - return profiler._Profiler._CURRENT_PROFILER is not None - - -def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: - """Create normalization module for transformer encoder layer. - - Args: - norm_type (str): Normalization method. - dim (int): Dimension of the normalized layer. - **kwargs (dict): Additional parameters for normalization layer. - Returns: - nn.Module: Normalization module. - """ - if norm_type == 'layer_norm': - return nn.LayerNorm(dim, eps=1e-5, **kwargs) - else: - raise ValueError(f"Unknown norm type: {norm_type}") - - -def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, - dtype: torch.dtype = torch.float32) -> torch.Tensor: - """Create sinusoidal positional embedding, with shape `[B, T, C]`. - - Args: - positions (torch.Tensor): LongTensor of positions. - dim (int): Dimension of the embedding. - max_period (float): Maximum period of the cosine/sine functions. - dtype (torch.dtype or str): dtype to use to generate the embedding. - Returns: - torch.Tensor: Sinusoidal positional embedding. - """ - # We aim for BTC format - assert dim % 2 == 0 - half_dim = dim // 2 - positions = positions.to(dtype) - adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) - max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point - phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) - return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1) - - -def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers""" - bs, slen, n_kv_heads, head_dim = x.shape - if n_rep == 1: - return x - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) - - -class LayerScale(nn.Module): - """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). - This rescales diagonaly the residual outputs close to 0, with a learnt scale. - - Args: - channels (int): Number of channels. - init (float): Initial scale. - channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype or None): dtype to use to initialize the module. - """ - def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True, - device=None, dtype=None): - super().__init__() - self.channel_last = channel_last - self.scale = nn.Parameter( - torch.full((channels,), init, - requires_grad=True, device=device, dtype=dtype)) - - def forward(self, x: torch.Tensor): - if self.channel_last: - return self.scale * x - else: - return self.scale[:, None] * x - - -class StreamingMultiheadAttention(StreamingModule): - """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation. - - Args: - embed_dim (int): Dimension to project to. - num_heads (int): Number of heads. - dropout (float): Dropout level. - bias (bool): Use bias in projections. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - rope (`RotaryEmbedding` or None): Rope embedding to use. - cross_attention: Should be true when used as a cross attention. - All keys and values must be available at once, streaming is only for the queries. - Cannot be used with `causal` or `rope` (as it wouldn't make sens to - intepret the time steps in the keys relative to those in the queries). - safe_streaming (bool): Bug fix, will go away with xformers update. - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Sevice on which to initialize. - dtype (torch.dtype or None): dtype to use. - """ - def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False, - safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1, - device=None, dtype=None): - super().__init__() - factory_kwargs = {'device': device, 'dtype': dtype} - if past_context is not None: - assert causal - - self.embed_dim = embed_dim - self.causal = causal - self.past_context = past_context - self.memory_efficient = memory_efficient - self.attention_as_float32 = attention_as_float32 - self.rope = rope - self.cross_attention = cross_attention - self.safe_streaming = safe_streaming - self.num_heads = num_heads - self.dropout = dropout - self.kv_repeat = kv_repeat - if cross_attention: - assert not causal, "Causal cannot work with cross attention." - assert rope is None, "Rope cannot work with cross attention." - - if memory_efficient: - _verify_xformers_memory_efficient_compat() - - self.custom = _is_custom(custom, memory_efficient) - if self.custom: - out_dim = embed_dim - assert num_heads % kv_repeat == 0 - assert not cross_attention or kv_repeat == 1 - num_kv = num_heads // kv_repeat - kv_dim = (embed_dim // num_heads) * num_kv - out_dim += 2 * kv_dim - in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs) - # We try to follow the default PyTorch MHA convention, to easily compare results. - self.in_proj_weight = in_proj.weight - self.in_proj_bias = in_proj.bias - if bias: - self.in_proj_bias.data.zero_() # Following Pytorch convention - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) - if bias: - self.out_proj.bias.data.zero_() - else: - assert not qk_layer_norm - assert kv_repeat == 1 - self.mha = nn.MultiheadAttention( - embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True, - **factory_kwargs) - self.qk_layer_norm = qk_layer_norm - if qk_layer_norm: - assert self.custom - assert kv_repeat == 1 - ln_dim = embed_dim - self.q_layer_norm = nn.LayerNorm(ln_dim) - self.k_layer_norm = nn.LayerNorm(ln_dim) - - def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): - if not self.custom: - # Support compat with regular MHA - keys = [n for n, _ in self.mha.named_parameters()] - for key in keys: - if prefix + key in state_dict: - state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key) - super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) - - def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype): - # Return a causal mask, accounting for potentially stored past keys/values - # We actually return a bias for the attention score, as this has the same - # convention both in the builtin MHA in Pytorch, and Xformers functions. - if self.memory_efficient: - from xformers.ops import LowerTriangularMask - if current_steps == 1: - # If we only have one step, then we do not need a mask. - return None - elif 'past_keys' in self._streaming_state: - raise RuntimeError('Not supported at the moment') - else: - # Then we can safely use a lower triangular mask - return LowerTriangularMask() - if self._streaming_state: - past_keys = self._streaming_state['past_keys'] - past_steps = past_keys.shape[1] - else: - past_steps = 0 - - queries_pos = torch.arange( - past_steps, current_steps + past_steps, device=device).view(-1, 1) - keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1) - delta = queries_pos - keys_pos - valid = delta >= 0 - if self.past_context is not None: - valid &= (delta <= self.past_context) - return torch.where( - valid, - torch.zeros([], device=device, dtype=dtype), - torch.full([], float('-inf'), device=device, dtype=dtype)) - - def _complete_kv(self, k, v): - if self.cross_attention: - # With cross attention we assume all keys and values - # are already available, and streaming is with respect - # to the queries only. - return k, v - # Complete the key/value pair using the streaming state. - if self._streaming_state: - pk = self._streaming_state['past_keys'] - nk = torch.cat([pk, k], dim=1) - if v is k: - nv = nk - else: - pv = self._streaming_state['past_values'] - nv = torch.cat([pv, v], dim=1) - else: - nk = k - nv = v - - assert nk.shape[1] == nv.shape[1] - offset = 0 - if self.past_context is not None: - offset = max(0, nk.shape[1] - self.past_context) - if self._is_streaming: - self._streaming_state['past_keys'] = nk[:, offset:] - if v is not k: - self._streaming_state['past_values'] = nv[:, offset:] - if 'offset' in self._streaming_state: - self._streaming_state['offset'] += offset - else: - self._streaming_state['offset'] = torch.tensor(0) - return nk, nv - - def _apply_rope(self, query: torch.Tensor, key: torch.Tensor): - # Apply rope embeddings to query and key tensors. - assert self.rope is not None - if 'past_keys' in self._streaming_state: - past_keys_offset = self._streaming_state['past_keys'].shape[1] - else: - past_keys_offset = 0 - if 'offset' in self._streaming_state: - past_context_offset = int(self._streaming_state['offset'].item()) - else: - past_context_offset = 0 - streaming_offset = past_context_offset + past_keys_offset - return self.rope.rotate_qk(query, key, start=streaming_offset) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - key_padding_mask=None, need_weights=False, attn_mask=None, - average_attn_weights=True, is_causal=False): - assert attn_mask is None - assert not is_causal, ("new param added in torch 2.0.1 not supported, " - "use the causal args in the constructor.") - - dtype = query.dtype - if self._is_streaming: - assert self.causal or self.cross_attention, \ - "Streaming only available for causal or cross attention" - - if self.causal: - # At the moment we specialize only for the self-attention case. - assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value" - assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value" - attn_mask = self._get_mask(query.shape[1], query.device, query.dtype) - - if self.custom: - # custom implementation - assert need_weights is False - assert key_padding_mask is None - if self.cross_attention: - # Different queries, keys, values, we have to spit manually the weights - # before applying the linear. - dim = self.in_proj_weight.shape[0] // 3 - if self.in_proj_bias is None: - bias_q, bias_k, bias_v = None, None, None - else: - bias_q = self.in_proj_bias[:dim] - bias_k = self.in_proj_bias[dim: 2 * dim] - bias_v = self.in_proj_bias[2 * dim:] - q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q) - # todo: when streaming, we could actually save k, v and check the shape actually match. - k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k) - v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v) - if self.qk_layer_norm is True: - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - # q, k, v = [rearrange(x, "b t (h d) -> (b h) t d", h=self.num_heads) for x in [q, k, v]] - q, k, v = [rearrange(x, "b t (h d) -> b t h d", h=self.num_heads) for x in [q, k, v]] - else: - if not _is_profiled(): - # profiling breaks that propertysomehow. - assert query is key, "specialized implementation" - assert value is key, "specialized implementation" - projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias) - if self.kv_repeat == 1: - packed = rearrange(projected, "b t (p h d) -> b t p h d", p=3, h=self.num_heads) - q, k, v = ops.unbind(packed, dim=2) - else: - embed_dim = self.embed_dim - per_head_dim = (embed_dim // self.num_heads) - kv_heads = self.num_heads // self.kv_repeat - q = projected[:, :, :embed_dim] - start = embed_dim - end = start + per_head_dim * kv_heads - k = projected[:, :, start: end] - v = projected[:, :, end:] - q = rearrange(q, "b t (h d) -> b t h d", h=self.num_heads) - k = rearrange(k, "b t (h d) -> b t h d", h=kv_heads) - v = rearrange(v, "b t (h d) -> b t h d", h=kv_heads) - - if self.qk_layer_norm is True: - assert self.kv_repeat == 1 - q, k = [rearrange(x, "b t h d -> b t (h d)") for x in [q, k]] - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k = [rearrange(x, "b t (h d) -> b t h d", h=self.num_heads) for x in [q, k]] - if self.rope: - q, k = self._apply_rope(q, k) - k, v = self._complete_kv(k, v) - if self.kv_repeat > 1: - k = expand_repeated_kv(k, self.kv_repeat) - v = expand_repeated_kv(v, self.kv_repeat) - if self.attention_as_float32: - q, k, v = [x.float() for x in [q, k, v]] - if self.memory_efficient: - p = self.dropout if self.training else 0 - x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p) - else: - # We include the dot product as float32, for consistency - # with the other implementations that include that step - # as part of the attention. Note that when using `autocast`, - # the einsums would be done as bfloat16, but the softmax - # would be done as bfloat16, so `attention_as_float32` will - # extend a bit the range of operations done in float32, - # although this should make no difference. - q = q / q.shape[-1] ** 0.5 - if self._is_streaming and self.safe_streaming and q.device.type == 'cuda': - with torch.autocast(device_type=q.device.type, dtype=torch.float32): - pre_w = torch.einsum("bqhc,bkhc->bhqk", q, k) - else: - pre_w = torch.einsum("bqhc,bkhc->bhqk", q, k) - if attn_mask is not None: - pre_w = pre_w + attn_mask - w = torch.softmax(pre_w, dim=-1) - w = F.dropout(w, self.dropout, training=self.training).to(v) - x = torch.einsum("bhqk,bkhc->bqhc", w, v) - x = x.to(dtype) - x = rearrange(x, "b t h d -> b t (h d)", h=self.num_heads) - x = self.out_proj(x) - else: - key, value = self._complete_kv(key, value) - if self.attention_as_float32: - query, key, value = [x.float() for x in [query, key, value]] - x, _ = self.mha( - query, key, value, key_padding_mask, - need_weights, attn_mask, average_attn_weights) - x = x.to(dtype) - - return x, None - - -class StreamingTransformerLayer(nn.TransformerEncoderLayer): - """TransformerLayer with Streaming / Causal support. - This also integrates cross_attention, when passing `cross_attention=True`, - rather than having two separate classes like in PyTorch. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention. - qk_layer_norm_cross (bool): Same for the cross attention. - cross_attention (bool): If True, expect to get secondary input for cross-attention. - Cross attention will use the default MHA, as it typically won't require - special treatment. - layer_scale (float or None): If not None, LayerScale will be used with - the given value as initial scale. - rope (`RotaryEmbedding` or None): Rope embedding to use. - attention_dropout (float or None): If not None, separate the value of the dimension dropout - in FFN and of the attention dropout. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1, - bias_ff: bool = True, bias_attn: bool = True, causal: bool = False, - past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None, - kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs): - super().__init__(d_model, num_heads, dim_feedforward, dropout, - device=device, dtype=dtype, batch_first=True, **kwargs) - factory_kwargs = {'device': device, 'dtype': dtype} - # Redefine self_attn to our streaming multi-head attention - attn_kwargs: tp.Dict[str, tp.Any] = { - 'embed_dim': d_model, - 'num_heads': num_heads, - 'dropout': dropout if attention_dropout is None else attention_dropout, - 'bias': bias_attn, - 'custom': custom, - 'memory_efficient': memory_efficient, - 'attention_as_float32': attention_as_float32, - } - self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention( - causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm, - kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore - # Redefine feedforward layers to expose bias parameter - self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs) - self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs) - - self.layer_scale_1: nn.Module - self.layer_scale_2: nn.Module - if layer_scale is None: - self.layer_scale_1 = nn.Identity() - self.layer_scale_2 = nn.Identity() - else: - self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs) - self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs) - - self.cross_attention: tp.Optional[nn.Module] = None - if cross_attention: - self.cross_attention = StreamingMultiheadAttention( - cross_attention=True, qk_layer_norm=qk_layer_norm_cross, - **attn_kwargs, **factory_kwargs) - # Norm and dropout - self.dropout_cross = nn.Dropout(dropout) - # eps value matching that used in PyTorch reference implementation. - self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs) - self.layer_scale_cross: nn.Module - if layer_scale is None: - self.layer_scale_cross = nn.Identity() - else: - self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs) - self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - - def _cross_attention_block(self, src: torch.Tensor, - cross_attention_src: torch.Tensor) -> torch.Tensor: - assert self.cross_attention is not None - # queries are from src, keys and values from cross_attention_src. - x = self.cross_attention( - src, cross_attention_src, cross_attention_src, need_weights=False)[0] - return self.dropout_cross(x) # type: ignore - - def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore - src_key_padding_mask: tp.Optional[torch.Tensor] = None, - cross_attention_src: tp.Optional[torch.Tensor] = None): - if self.cross_attention is None: - assert cross_attention_src is None - else: - assert cross_attention_src is not None - x = src - if self.norm_first: - x = x + self.layer_scale_1( - self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)) - if cross_attention_src is not None: - x = x + self.layer_scale_cross( - self._cross_attention_block( - self.norm_cross(x), cross_attention_src)) - x = x + self.layer_scale_2(self._ff_block(self.norm2(x))) - else: - x = self.norm1(x + self.layer_scale_1( - self._sa_block(x, src_mask, src_key_padding_mask))) - if cross_attention_src is not None: - x = self.norm_cross( - x + self.layer_scale_cross( - self._cross_attention_block(src, cross_attention_src))) - x = self.norm2(x + self.layer_scale_2(self._ff_block(x))) - return x - - -class StreamingTransformer(StreamingModule): - """Transformer with Streaming / Causal support. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - cross_attention (bool): If True, expect to get secondary input for cross-attention. - layer_scale (float or None): If not None, LayerScale will be used - with the given value as initial scale. - positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope). - max_period (float): Maximum period of the time embedding. - positional_scale (float): Scale of positional embedding, set to 0 to deactivate. - xpos (bool): Apply xpos exponential decay to positional embedding (rope only). - lr (float or None): learning rate override through the `make_optim_group` API. - weight_decay (float or None): Weight_decay override through the `make_optim_group` API. - layer_class: (subclass of `StreamingTransformerLayer): class to use - to initialize the layers, allowing further customization outside of Audiocraft. - checkpointing (str): Checkpointing strategy to reduce memory usage. - No checkpointing if set to 'none'. Per layer checkpointing using PyTorch - if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice, - minimal memory usage, but maximal runtime). Finally, `xformers_default` provide - a policy for opting-out some operations of the checkpointing like - linear layers and attention, providing a middle ground between speed and memory. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048, - dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, - custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1., - xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None, - layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer, - checkpointing: str = 'none', device=None, dtype=None, **kwargs): - super().__init__() - assert d_model % num_heads == 0 - - self.positional_embedding = positional_embedding - self.max_period = max_period - self.positional_scale = positional_scale - self.weight_decay = weight_decay - self.lr = lr - - assert positional_embedding in ['sin', 'rope', 'sin_rope'] - self.rope: tp.Optional[RotaryEmbedding] = None - if self.positional_embedding in ['rope', 'sin_rope']: - assert _is_custom(custom, memory_efficient) - self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period, - xpos=xpos, scale=positional_scale, device=device) - - self.checkpointing = checkpointing - - assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm'] - if self.checkpointing.startswith('xformers'): - _verify_xformers_internal_compat() - - self.layers = nn.ModuleList() - for idx in range(num_layers): - self.layers.append( - layer_class( - d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward, - dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn, - causal=causal, past_context=past_context, custom=custom, - memory_efficient=memory_efficient, attention_as_float32=attention_as_float32, - cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope, - device=device, dtype=dtype, **kwargs)) - - if self.checkpointing != 'none': - for layer in self.layers: - # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the - # backward hook inside of FSDP... - layer._magma_checkpointed = True # type: ignore - assert layer.layer_drop == 0., "Need further checking" # type: ignore - - def _apply_layer(self, layer, *args, **kwargs): - method = self.checkpointing - if method == 'none': - return layer(*args, **kwargs) - elif method == 'torch': - return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs) - elif method.startswith('xformers'): - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy - if method == 'xformers_default': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "xformers.efficient_attention_forward_cutlass.default", - "xformers_flash.flash_fwd.default", - "aten.addmm.default", - "aten.mm.default", - ] - elif method == 'xformers_mm': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "aten.addmm.default", - "aten.mm.default", - ] - else: - raise ValueError(f"xformers checkpointing xformers policy {method} is not known.") - policy_fn = _get_default_policy(allow_list) - return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs) - else: - raise ValueError(f"Checkpointing method {method} is unknown.") - - def forward(self, x: torch.Tensor, *args, **kwargs): - B, T, C = x.shape - - if 'offsets' in self._streaming_state: - offsets = self._streaming_state['offsets'] - else: - offsets = torch.zeros(B, dtype=torch.long, device=x.device) - - if self.positional_embedding in ['sin', 'sin_rope']: - positions = torch.arange(T, device=x.device).view(1, -1, 1) - positions = positions + offsets.view(-1, 1, 1) - pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype) - x = x + self.positional_scale * pos_emb - - for layer in self.layers: - x = self._apply_layer(layer, x, *args, **kwargs) - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return x - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - if self.weight_decay is not None: - group["weight_decay"] = self.weight_decay - return group - - -# special attention attention related function - -def _verify_xformers_memory_efficient_compat(): - try: - from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa - except ImportError: - raise ImportError( - "xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _verify_xformers_internal_compat(): - try: - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa - except ImportError: - raise ImportError( - "Francisco's fairinternal xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _is_custom(custom: bool, memory_efficient: bool): - return custom or memory_efficient diff --git a/spaces/studiobrn/SplitTrack/setup.py b/spaces/studiobrn/SplitTrack/setup.py deleted file mode 100644 index 78a172b7c90003b689bde40b49cc8fe1fb8107d4..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/setup.py +++ /dev/null @@ -1,65 +0,0 @@ -""" - Copyright (c) Meta Platforms, Inc. and affiliates. - All rights reserved. - - This source code is licensed under the license found in the - LICENSE file in the root directory of this source tree. - -""" - -from pathlib import Path - -from setuptools import setup, find_packages - - -NAME = 'audiocraft' -DESCRIPTION = 'Audio research library for PyTorch' - -URL = 'https://github.com/fairinternal/audiocraft' -AUTHOR = 'FAIR Speech & Audio' -EMAIL = 'defossez@meta.com' -REQUIRES_PYTHON = '>=3.8.0' - -for line in open('audiocraft/__init__.py'): - line = line.strip() - if '__version__' in line: - context = {} - exec(line, context) - VERSION = context['__version__'] - -HERE = Path(__file__).parent - -try: - with open(HERE / "README.md", encoding='utf-8') as f: - long_description = '\n' + f.read() -except FileNotFoundError: - long_description = DESCRIPTION - -REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')] - -setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - author_email=EMAIL, - long_description=long_description, - long_description_content_type='text/markdown', - author=AUTHOR, - url=URL, - python_requires=REQUIRES_PYTHON, - install_requires=REQUIRED, - extras_require={ - 'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'], - }, - packages=find_packages(), - package_data={'audiocraft': ['py.typed']}, - include_package_data=True, - license='MIT License', - classifiers=[ - # Trove classifiers - # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers - 'License :: OSI Approved :: MIT License', - 'Topic :: Multimedia :: Sound/Audio', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - ], -) diff --git a/spaces/sudo-ai/zero123plus-demo-space/README.md b/spaces/sudo-ai/zero123plus-demo-space/README.md deleted file mode 100644 index cf3b7be9fc606c665b9ca7457e99233ea2c0277e..0000000000000000000000000000000000000000 --- a/spaces/sudo-ai/zero123plus-demo-space/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Zero123++ Demo Space -emoji: 🌒 -colorFrom: red -colorTo: purple -sdk: docker -app_port: 7860 -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sunshineatnoon/TextureScraping/swapae/options/__init__.py b/spaces/sunshineatnoon/TextureScraping/swapae/options/__init__.py deleted file mode 100644 index b0a0bfe84d3719d3c52cab7323e51e953e0600b2..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/swapae/options/__init__.py +++ /dev/null @@ -1,212 +0,0 @@ -import argparse -import shlex -import os -import pickle - -import swapae.util as util -import swapae.models as models -import swapae.models.networks as networks -import swapae.data as data -import swapae.evaluation as evaluation -import swapae.optimizers as optimizers -from swapae.util import IterationCounter -from swapae.util import Visualizer - - -class BaseOptions(): - def initialize(self, parser): - # experiment specifics - parser.add_argument('--name', type=str, default="ffhq512_pretrained", help='name of the experiment. It decides where to store samples and models') - parser.add_argument('--easy_label', type=str, default="") - - parser.add_argument('--num_gpus', type=int, default=1, help='#GPUs to use. 0 means CPU mode') - parser.add_argument('--checkpoints_dir', type=str, default='/home/xtli/Documents/GITHUB/swapping-autoencoder-pytorch/checkpoints/', help='models are saved here') - parser.add_argument('--model', type=str, default='swapping_autoencoder', help='which model to use') - parser.add_argument('--optimizer', type=str, default='swapping_autoencoder', help='which model to use') - parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') - parser.add_argument('--resume_iter', type=str, default="latest", - help="# iterations (in thousands) to resume") - parser.add_argument('--num_classes', type=int, default=0) - - # input/output sizes - parser.add_argument('--batch_size', type=int, default=1, help='input batch size') - parser.add_argument('--preprocess', type=str, default='resize', help='scaling and cropping of images at load time.') - parser.add_argument('--load_size', type=int, default=512, help='Scale images to this size. The final image will be cropped to --crop_size.') - parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)') - parser.add_argument('--preprocess_crop_padding', type=int, default=None, help='padding parameter of transforms.RandomCrop(). It is not used if --preprocess does not contain crop option.') - parser.add_argument('--no_flip', action='store_true') - parser.add_argument('--shuffle_dataset', type=str, default=None, choices=('true', 'false')) - - # for setting inputs - parser.add_argument('--dataroot', type=str, default="/home/xtli/Dropbox/swapping-autoencoder-pytorch/testphotos/ffhq512/fig9/") - parser.add_argument('--dataset_mode', type=str, default='imagefolder') - parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data') - - # networks - parser.add_argument("--netG", default="StyleGAN2Resnet") - parser.add_argument("--netD", default="StyleGAN2") - parser.add_argument("--netE", default="StyleGAN2Resnet") - parser.add_argument("--netPatchD", default="StyleGAN2") - parser.add_argument("--use_antialias", type=util.str2bool, default=True) - - parser.add_argument("-f", "--config_file", type=str, default='models/swap/json/sem_cons.json', help='json files including all arguments') - parser.add_argument("--local_rank", type=int) - - return parser - - def gather_options(self, command=None): - parser = AugmentedArgumentParser() - parser.custom_command = command - - # get basic options - parser = self.initialize(parser) - - # get the basic options - opt, unknown = parser.parse_known_args() - - # modify model-related parser options - model_name = opt.model - model_option_setter = models.get_option_setter(model_name) - parser = model_option_setter(parser, self.isTrain) - - # modify network-related parser options - parser = networks.modify_commandline_options(parser, self.isTrain) - - # modify optimizer-related parser options - optimizer_name = opt.optimizer - optimizer_option_setter = optimizers.get_option_setter(optimizer_name) - parser = optimizer_option_setter(parser, self.isTrain) - - # modify dataset-related parser options - dataset_mode = opt.dataset_mode - dataset_option_setter = data.get_option_setter(dataset_mode) - parser = dataset_option_setter(parser, self.isTrain) - - # modify parser options related to iteration_counting - parser = Visualizer.modify_commandline_options(parser, self.isTrain) - - # modify parser options related to iteration_counting - parser = IterationCounter.modify_commandline_options(parser, self.isTrain) - - # modify evaluation-related parser options - evaluation_option_setter = evaluation.get_option_setter() - parser = evaluation_option_setter(parser, self.isTrain) - - opt, unknown = parser.parse_known_args() - - opt = parser.parse_args() - self.parser = parser - return opt - - def print_options(self, opt): - """Print and save options - - It will print both current options and default values(if different). - It will save options into a text file / [checkpoints_dir] / opt.txt - """ - message = '' - message += '----------------- Options ---------------\n' - for k, v in sorted(vars(opt).items()): - comment = '' - default = self.parser.get_default(k) - if v != default: - comment = '\t[default: %s]' % str(default) - message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) - message += '----------------- End -------------------' - print(message) - - def option_file_path(self, opt, makedir=False): - expr_dir = os.path.join(opt.checkpoints_dir, opt.name) - if makedir: - util.mkdirs(expr_dir) - file_name = os.path.join(expr_dir, 'opt') - return file_name - - def save_options(self, opt): - file_name = self.option_file_path(opt, makedir=True) - with open(file_name + '.txt', 'wt') as opt_file: - for k, v in sorted(vars(opt).items()): - comment = '' - default = self.parser.get_default(k) - if v != default: - comment = '\t[default: %s]' % str(default) - opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)) - - with open(file_name + '.pkl', 'wb') as opt_file: - pickle.dump(opt, opt_file) - - def parse(self, save=False, command=None): - opt = self.gather_options(command) - opt.isTrain = self.isTrain # train or test - self.print_options(opt) - if opt.isTrain: - self.save_options(opt) - - opt.dataroot = os.path.expanduser(opt.dataroot) - - assert opt.num_gpus <= opt.batch_size, "Batch size must not be smaller than num_gpus" - return opt - - - -class TrainOptions(BaseOptions): - def __init__(self): - super().__init__() - self.isTrain = True - - def initialize(self, parser): - super().initialize(parser) - parser.add_argument('--continue_train', type=util.str2bool, default=False, help="resume training from last checkpoint") - parser.add_argument('--pretrained_name', type=str, default=None, - help="Load weights from the checkpoint of another experiment") - - return parser - - -class TestOptions(BaseOptions): - def __init__(self): - super().__init__() - self.isTrain = False - - def initialize(self, parser): - super().initialize(parser) - parser.add_argument("--result_dir", type=str, default="results") - return parser - - -class AugmentedArgumentParser(argparse.ArgumentParser): - def parse_args(self, args=None, namespace=None): - """ Enables passing bash commands as arguments to the class. - """ - print("parsing args...") - if args is None and hasattr(self, 'custom_command') and self.custom_command is not None: - print('using custom command') - print(self.custom_command) - args = shlex.split(self.custom_command)[2:] - return super().parse_args(args, namespace) - - def parse_known_args(self, args=None, namespace=None): - if args is None and hasattr(self, 'custom_command') and self.custom_command is not None: - args = shlex.split(self.custom_command)[2:] - return super().parse_known_args(args, namespace) - - def add_argument(self, *args, **kwargs): - """ Support for providing a new argument type called "str2bool" - - Example: - parser.add_argument("--my_option", type=util.str2bool, default=|bool|) - - 1. "python train.py" sets my_option to be |bool| - 2. "python train.py --my_option" sets my_option to be True - 3. "python train.py --my_option False" sets my_option to be False - 4. "python train.py --my_option True" sets my_options to be True - - https://stackoverflow.com/a/43357954 - """ - - if 'type' in kwargs and kwargs['type'] == util.str2bool: - if 'nargs' not in kwargs: - kwargs['nargs'] = "?" - if 'const' not in kwargs: - kwargs['const'] = True - super().add_argument(*args, **kwargs) diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/script_callbacks.py b/spaces/supertori/files/stable-diffusion-webui/modules/script_callbacks.py deleted file mode 100644 index fdfbd82754a2601e7bed7b758e3f58dd71e15716..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/script_callbacks.py +++ /dev/null @@ -1,365 +0,0 @@ -import sys -import traceback -from collections import namedtuple -import inspect -from typing import Optional, Dict, Any - -from fastapi import FastAPI -from gradio import Blocks - - -def report_exception(c, job): - print(f"Error executing callback {job} for {c.script}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - -class ImageSaveParams: - def __init__(self, image, p, filename, pnginfo): - self.image = image - """the PIL image itself""" - - self.p = p - """p object with processing parameters; either StableDiffusionProcessing or an object with same fields""" - - self.filename = filename - """name of file that the image would be saved to""" - - self.pnginfo = pnginfo - """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" - - -class CFGDenoiserParams: - def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond): - self.x = x - """Latent image representation in the process of being denoised""" - - self.image_cond = image_cond - """Conditioning image""" - - self.sigma = sigma - """Current sigma noise step value""" - - self.sampling_step = sampling_step - """Current Sampling step number""" - - self.total_sampling_steps = total_sampling_steps - """Total number of sampling steps planned""" - - self.text_cond = text_cond - """ Encoder hidden states of text conditioning from prompt""" - - self.text_uncond = text_uncond - """ Encoder hidden states of text conditioning from negative prompt""" - - -class CFGDenoisedParams: - def __init__(self, x, sampling_step, total_sampling_steps): - self.x = x - """Latent image representation in the process of being denoised""" - - self.sampling_step = sampling_step - """Current Sampling step number""" - - self.total_sampling_steps = total_sampling_steps - """Total number of sampling steps planned""" - - -class UiTrainTabParams: - def __init__(self, txt2img_preview_params): - self.txt2img_preview_params = txt2img_preview_params - - -class ImageGridLoopParams: - def __init__(self, imgs, cols, rows): - self.imgs = imgs - self.cols = cols - self.rows = rows - - -ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) -callback_map = dict( - callbacks_app_started=[], - callbacks_model_loaded=[], - callbacks_ui_tabs=[], - callbacks_ui_train_tabs=[], - callbacks_ui_settings=[], - callbacks_before_image_saved=[], - callbacks_image_saved=[], - callbacks_cfg_denoiser=[], - callbacks_cfg_denoised=[], - callbacks_before_component=[], - callbacks_after_component=[], - callbacks_image_grid=[], - callbacks_infotext_pasted=[], - callbacks_script_unloaded=[], - callbacks_before_ui=[], -) - - -def clear_callbacks(): - for callback_list in callback_map.values(): - callback_list.clear() - - -def app_started_callback(demo: Optional[Blocks], app: FastAPI): - for c in callback_map['callbacks_app_started']: - try: - c.callback(demo, app) - except Exception: - report_exception(c, 'app_started_callback') - - -def model_loaded_callback(sd_model): - for c in callback_map['callbacks_model_loaded']: - try: - c.callback(sd_model) - except Exception: - report_exception(c, 'model_loaded_callback') - - -def ui_tabs_callback(): - res = [] - - for c in callback_map['callbacks_ui_tabs']: - try: - res += c.callback() or [] - except Exception: - report_exception(c, 'ui_tabs_callback') - - return res - - -def ui_train_tabs_callback(params: UiTrainTabParams): - for c in callback_map['callbacks_ui_train_tabs']: - try: - c.callback(params) - except Exception: - report_exception(c, 'callbacks_ui_train_tabs') - - -def ui_settings_callback(): - for c in callback_map['callbacks_ui_settings']: - try: - c.callback() - except Exception: - report_exception(c, 'ui_settings_callback') - - -def before_image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_before_image_saved']: - try: - c.callback(params) - except Exception: - report_exception(c, 'before_image_saved_callback') - - -def image_saved_callback(params: ImageSaveParams): - for c in callback_map['callbacks_image_saved']: - try: - c.callback(params) - except Exception: - report_exception(c, 'image_saved_callback') - - -def cfg_denoiser_callback(params: CFGDenoiserParams): - for c in callback_map['callbacks_cfg_denoiser']: - try: - c.callback(params) - except Exception: - report_exception(c, 'cfg_denoiser_callback') - - -def cfg_denoised_callback(params: CFGDenoisedParams): - for c in callback_map['callbacks_cfg_denoised']: - try: - c.callback(params) - except Exception: - report_exception(c, 'cfg_denoised_callback') - - -def before_component_callback(component, **kwargs): - for c in callback_map['callbacks_before_component']: - try: - c.callback(component, **kwargs) - except Exception: - report_exception(c, 'before_component_callback') - - -def after_component_callback(component, **kwargs): - for c in callback_map['callbacks_after_component']: - try: - c.callback(component, **kwargs) - except Exception: - report_exception(c, 'after_component_callback') - - -def image_grid_callback(params: ImageGridLoopParams): - for c in callback_map['callbacks_image_grid']: - try: - c.callback(params) - except Exception: - report_exception(c, 'image_grid') - - -def infotext_pasted_callback(infotext: str, params: Dict[str, Any]): - for c in callback_map['callbacks_infotext_pasted']: - try: - c.callback(infotext, params) - except Exception: - report_exception(c, 'infotext_pasted') - - -def script_unloaded_callback(): - for c in reversed(callback_map['callbacks_script_unloaded']): - try: - c.callback() - except Exception: - report_exception(c, 'script_unloaded') - - -def before_ui_callback(): - for c in reversed(callback_map['callbacks_before_ui']): - try: - c.callback() - except Exception: - report_exception(c, 'before_ui') - - -def add_callback(callbacks, fun): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if len(stack) > 0 else 'unknown file' - - callbacks.append(ScriptCallback(filename, fun)) - - -def remove_current_script_callbacks(): - stack = [x for x in inspect.stack() if x.filename != __file__] - filename = stack[0].filename if len(stack) > 0 else 'unknown file' - if filename == 'unknown file': - return - for callback_list in callback_map.values(): - for callback_to_remove in [cb for cb in callback_list if cb.script == filename]: - callback_list.remove(callback_to_remove) - - -def remove_callbacks_for_function(callback_func): - for callback_list in callback_map.values(): - for callback_to_remove in [cb for cb in callback_list if cb.callback == callback_func]: - callback_list.remove(callback_to_remove) - - -def on_app_started(callback): - """register a function to be called when the webui started, the gradio `Block` component and - fastapi `FastAPI` object are passed as the arguments""" - add_callback(callback_map['callbacks_app_started'], callback) - - -def on_model_loaded(callback): - """register a function to be called when the stable diffusion model is created; the model is - passed as an argument; this function is also called when the script is reloaded. """ - add_callback(callback_map['callbacks_model_loaded'], callback) - - -def on_ui_tabs(callback): - """register a function to be called when the UI is creating new tabs. - The function must either return a None, which means no new tabs to be added, or a list, where - each element is a tuple: - (gradio_component, title, elem_id) - - gradio_component is a gradio component to be used for contents of the tab (usually gr.Blocks) - title is tab text displayed to user in the UI - elem_id is HTML id for the tab - """ - add_callback(callback_map['callbacks_ui_tabs'], callback) - - -def on_ui_train_tabs(callback): - """register a function to be called when the UI is creating new tabs for the train tab. - Create your new tabs with gr.Tab. - """ - add_callback(callback_map['callbacks_ui_train_tabs'], callback) - - -def on_ui_settings(callback): - """register a function to be called before UI settings are populated; add your settings - by using shared.opts.add_option(shared.OptionInfo(...)) """ - add_callback(callback_map['callbacks_ui_settings'], callback) - - -def on_before_image_saved(callback): - """register a function to be called before an image is saved to a file. - The callback is called with one argument: - - params: ImageSaveParams - parameters the image is to be saved with. You can change fields in this object. - """ - add_callback(callback_map['callbacks_before_image_saved'], callback) - - -def on_image_saved(callback): - """register a function to be called after an image is saved to a file. - The callback is called with one argument: - - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. - """ - add_callback(callback_map['callbacks_image_saved'], callback) - - -def on_cfg_denoiser(callback): - """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. - The callback is called with one argument: - - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. - """ - add_callback(callback_map['callbacks_cfg_denoiser'], callback) - - -def on_cfg_denoised(callback): - """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. - The callback is called with one argument: - - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. - """ - add_callback(callback_map['callbacks_cfg_denoised'], callback) - - -def on_before_component(callback): - """register a function to be called before a component is created. - The callback is called with arguments: - - component - gradio component that is about to be created. - - **kwargs - args to gradio.components.IOComponent.__init__ function - - Use elem_id/label fields of kwargs to figure out which component it is. - This can be useful to inject your own components somewhere in the middle of vanilla UI. - """ - add_callback(callback_map['callbacks_before_component'], callback) - - -def on_after_component(callback): - """register a function to be called after a component is created. See on_before_component for more.""" - add_callback(callback_map['callbacks_after_component'], callback) - - -def on_image_grid(callback): - """register a function to be called before making an image grid. - The callback is called with one argument: - - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. - """ - add_callback(callback_map['callbacks_image_grid'], callback) - - -def on_infotext_pasted(callback): - """register a function to be called before applying an infotext. - The callback is called with two arguments: - - infotext: str - raw infotext. - - result: Dict[str, any] - parsed infotext parameters. - """ - add_callback(callback_map['callbacks_infotext_pasted'], callback) - - -def on_script_unloaded(callback): - """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that - the script did should be reverted here""" - - add_callback(callback_map['callbacks_script_unloaded'], callback) - - -def on_before_ui(callback): - """register a function to be called before the UI is created.""" - - add_callback(callback_map['callbacks_before_ui'], callback) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Monamour 2006 720p Bluray X264 Hd4u.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Monamour 2006 720p Bluray X264 Hd4u.md deleted file mode 100644 index 9e8c35379a2af84e63da463f8216e287cd19a309..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Monamour 2006 720p Bluray X264 Hd4u.md +++ /dev/null @@ -1,22 +0,0 @@ -

        Monamour 2006 720p bluray x264 hd4u


        Download Zip ✸✸✸ https://cinurl.com/2uEYQf



        -
        -monamour 2006 bluray 720p.mkv<br> monamour.2006.bluray.720p.mkv.srt [cyberchick.ru] - -If you don't want to convert it yourself, you could download this.mov file directly and save it to your computer, but it is a huge file so be patient. - -Re: monamour 2006 720p bluray x264 hd4u.. monamour.2006.bluray.720p.ac3.x264-chd english subtitle - -Or did you know you can download them to your local hard drive to save space by using any torrent program. - -If you don't want to convert it yourself, you could download this.mov file directly and save it to your computer, but it is a huge file so be patient. - -There is no free download link on this site. I thought all clips were free. It must be that one is not? - -There are 100's of free monamours 2006 bluray on the Internet. I linked to the cleane one (I couldn't find the free one), since it was the first thing I found. Click here to watch on torrents: - -[cyberchick.ru] - -Re: monamour 2006 720p bluray x264 hd4u.. monamour.2006.bluray 4fefd39f24
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ngintip Anak Smp Lagi Kencing LINK.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ngintip Anak Smp Lagi Kencing LINK.md deleted file mode 100644 index 19344319aeb4bd230d2f060c3c742d9859463d8f..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Ngintip Anak Smp Lagi Kencing LINK.md +++ /dev/null @@ -1,10 +0,0 @@ -
        -

        ngintip 3. 1.1m 50% 1min 12sec - 1080p. ngintip anak smp lagi kencing. 2. 6m 100% 1min 39sec - 360p. ngintip anak smp thailan pipis di toilet 03:00. hidden cam - anak smp pipis di toilet upload by : bokepers community. 3. 4.9m 100% 4min - 360p. ngintip jilbab ketauan mesum.4m 100% 4min - 360p. ngintip anak smp pipis di toilet 03:00.

        -

        ngintip anak smp lagi kencing


        Download Zip ✸✸✸ https://cinurl.com/2uEY5E



        -

        watch anak smp wak wanita bhinib koi on, the best hardcore porn site. pornhub is home to the widest selection of free masturbation sex videos full of the hottest pornstars. if you're craving big dick xxx movies you'll find them here..

        -

        anak smp pinggir pipis - ngintip anak sma mesum.. anak sma penelusuran ngintip - ngintip anak sma mesum. ngintip anak smp lagi kencing free videos found on xvideos for this search. ngintip anak smp lagi kencing ngintip anak pipis lagi free videos found on xvideos for this search.

        -

        ngintip anak smp mandi. ngintip anak smp lagi kencing. ngintip anak smp pipis. watch anak smp wanita bhinib koi on, the best hardcore porn site. pornhub is home to the widest selection of free masturbation sex videos full of the hottest pornstars. if you're craving big dick xxx movies you'll find them here.. ngintip anak smp lagi kencing ngintip anak pipis lagi free videos found on xvideos for this search.

        -

        anak smp pipis wc - ngintip anak sma mesum.. anak sma penelusuran ngintip - ngintip anak sma mesum. ngintip anak smp lagi kencing free videos found on xvideos for this search. anak smp wak wanita bhinib koi on, the best hardcore porn site. pornhub is home to the widest selection of free masturbation sex videos full of the hottest pornstars. if you're craving big dick xxx movies you'll find them here.

        -

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/priority.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/priority.py deleted file mode 100644 index 64cc4e3a05f8d5b89ab6eb32461e6e80f1d62e67..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/runner/priority.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - - -class Priority(Enum): - """Hook priority levels. - - +--------------+------------+ - | Level | Value | - +==============+============+ - | HIGHEST | 0 | - +--------------+------------+ - | VERY_HIGH | 10 | - +--------------+------------+ - | HIGH | 30 | - +--------------+------------+ - | ABOVE_NORMAL | 40 | - +--------------+------------+ - | NORMAL | 50 | - +--------------+------------+ - | BELOW_NORMAL | 60 | - +--------------+------------+ - | LOW | 70 | - +--------------+------------+ - | VERY_LOW | 90 | - +--------------+------------+ - | LOWEST | 100 | - +--------------+------------+ - """ - - HIGHEST = 0 - VERY_HIGH = 10 - HIGH = 30 - ABOVE_NORMAL = 40 - NORMAL = 50 - BELOW_NORMAL = 60 - LOW = 70 - VERY_LOW = 90 - LOWEST = 100 - - -def get_priority(priority): - """Get priority value. - - Args: - priority (int or str or :obj:`Priority`): Priority. - - Returns: - int: The priority value. - """ - if isinstance(priority, int): - if priority < 0 or priority > 100: - raise ValueError('priority must be between 0 and 100') - return priority - elif isinstance(priority, Priority): - return priority.value - elif isinstance(priority, str): - return Priority[priority.upper()].value - else: - raise TypeError('priority must be an integer or Priority enum value') diff --git a/spaces/t110-ai-admin/InspectLens/video_llama/runners/runner_base.py b/spaces/t110-ai-admin/InspectLens/video_llama/runners/runner_base.py deleted file mode 100644 index c944123917dd0bf9947f4204f9044538a0f8bf22..0000000000000000000000000000000000000000 --- a/spaces/t110-ai-admin/InspectLens/video_llama/runners/runner_base.py +++ /dev/null @@ -1,658 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import datetime -import json -import logging -import os -import time -from pathlib import Path - -import torch -import torch.distributed as dist -import webdataset as wds -from video_llama.common.dist_utils import ( - download_cached_file, - get_rank, - get_world_size, - is_main_process, - main_process, -) -from video_llama.common.registry import registry -from video_llama.common.utils import is_url -from video_llama.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset -from video_llama.datasets.datasets.dataloader_utils import ( - IterLoader, - MultiIterLoader, - PrefetchLoader, -) -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.data import DataLoader, DistributedSampler - - -@registry.register_runner("runner_base") -class RunnerBase: - """ - A runner class to train and evaluate a model given a task and datasets. - - The runner uses pytorch distributed data parallel by default. Future release - will support other distributed frameworks. - """ - - def __init__(self, cfg, task, model, datasets, job_id): - self.config = cfg - self.job_id = job_id - - self.task = task - self.datasets = datasets - - self._model = model - - self._wrapped_model = None - self._device = None - self._optimizer = None - self._scaler = None - self._dataloaders = None - self._lr_sched = None - - self.start_epoch = 0 - - # self.setup_seeds() - self.setup_output_dir() - - @property - def device(self): - if self._device is None: - self._device = torch.device(self.config.run_cfg.device) - - return self._device - - @property - def use_distributed(self): - return self.config.run_cfg.distributed - - @property - def model(self): - """ - A property to get the DDP-wrapped model on the device. - """ - # move model to device - if self._model.device != self.device: - self._model = self._model.to(self.device) - - # distributed training wrapper - if self.use_distributed: - if self._wrapped_model is None: - self._wrapped_model = DDP( - self._model, device_ids=[self.config.run_cfg.gpu] - ) - else: - self._wrapped_model = self._model - - return self._wrapped_model - - @property - def optimizer(self): - # TODO make optimizer class and configurations - if self._optimizer is None: - num_parameters = 0 - p_wd, p_non_wd = [], [] - for n, p in self.model.named_parameters(): - if not p.requires_grad: - continue # frozen weights - print(n) - if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: - p_non_wd.append(p) - else: - p_wd.append(p) - num_parameters += p.data.nelement() - logging.info("number of trainable parameters: %d" % num_parameters) - optim_params = [ - { - "params": p_wd, - "weight_decay": float(self.config.run_cfg.weight_decay), - }, - {"params": p_non_wd, "weight_decay": 0}, - ] - beta2 = self.config.run_cfg.get("beta2", 0.999) - self._optimizer = torch.optim.AdamW( - optim_params, - lr=float(self.config.run_cfg.init_lr), - weight_decay=float(self.config.run_cfg.weight_decay), - betas=(0.9, beta2), - ) - - return self._optimizer - - @property - def scaler(self): - amp = self.config.run_cfg.get("amp", False) - - if amp: - if self._scaler is None: - self._scaler = torch.cuda.amp.GradScaler() - - return self._scaler - - @property - def lr_scheduler(self): - """ - A property to get and create learning rate scheduler by split just in need. - """ - if self._lr_sched is None: - lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) - - # max_epoch = self.config.run_cfg.max_epoch - max_epoch = self.max_epoch - # min_lr = self.config.run_cfg.min_lr - min_lr = self.min_lr - # init_lr = self.config.run_cfg.init_lr - init_lr = self.init_lr - - # optional parameters - decay_rate = self.config.run_cfg.get("lr_decay_rate", None) - warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) - warmup_steps = self.config.run_cfg.get("warmup_steps", 0) - iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) - - if iters_per_epoch is None: - try: - iters_per_epoch = len(self.dataloaders['train']) - except (AttributeError, TypeError): - iters_per_epoch = 10000 - - self._lr_sched = lr_sched_cls( - optimizer=self.optimizer, - max_epoch=max_epoch, - iters_per_epoch=iters_per_epoch, - min_lr=min_lr, - init_lr=init_lr, - decay_rate=decay_rate, - warmup_start_lr=warmup_start_lr, - warmup_steps=warmup_steps, - ) - - return self._lr_sched - - @property - def dataloaders(self) -> dict: - """ - A property to get and create dataloaders by split just in need. - - If no train_dataset_ratio is provided, concatenate map-style datasets and - chain wds.DataPipe datasets separately. Training set becomes a tuple - (ConcatDataset, ChainDataset), both are optional but at least one of them is - required. The resultant ConcatDataset and ChainDataset will be sampled evenly. - - If train_dataset_ratio is provided, create a MultiIterLoader to sample - each dataset by ratios during training. - - Currently do not support multiple datasets for validation and test. - - Returns: - dict: {split_name: (tuples of) dataloader} - """ - if self._dataloaders is None: - - # concatenate map-style datasets and chain wds.DataPipe datasets separately - # training set becomes a tuple (ConcatDataset, ChainDataset), both are - # optional but at least one of them is required. The resultant ConcatDataset - # and ChainDataset will be sampled evenly. - logging.info( - "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." - ) - - datasets = reorg_datasets_by_split(self.datasets) - self.datasets = datasets - # self.datasets = concat_datasets(datasets) - - # print dataset statistics after concatenation/chaining - for split_name in self.datasets: - if isinstance(self.datasets[split_name], tuple) or isinstance( - self.datasets[split_name], list - ): - # mixed wds.DataPipeline and torch.utils.data.Dataset - num_records = sum( - [ - len(d) - if not type(d) in [wds.DataPipeline, ChainDataset] - else 0 - for d in self.datasets[split_name] - ] - ) - - else: - if hasattr(self.datasets[split_name], "__len__"): - # a single map-style dataset - num_records = len(self.datasets[split_name]) - else: - # a single wds.DataPipeline - num_records = -1 - logging.info( - "Only a single wds.DataPipeline dataset, no __len__ attribute." - ) - - if num_records >= 0: - logging.info( - "Loaded {} records for {} split from the dataset.".format( - num_records, split_name - ) - ) - - # create dataloaders - split_names = sorted(self.datasets.keys()) - - datasets = [self.datasets[split] for split in split_names] - is_trains = [split in self.train_splits for split in split_names] - - batch_sizes = [ - self.config.run_cfg.batch_size_train - if split == "train" - else self.config.run_cfg.batch_size_eval - for split in split_names - ] - - collate_fns = [] - for dataset in datasets: - if isinstance(dataset, tuple) or isinstance(dataset, list): - collate_fns.append([getattr(d, "collater", None) for d in dataset]) - else: - collate_fns.append(getattr(dataset, "collater", None)) - - dataloaders = self.create_loaders( - datasets=datasets, - num_workers=self.config.run_cfg.num_workers, - batch_sizes=batch_sizes, - is_trains=is_trains, - collate_fns=collate_fns, - ) - - self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} - - return self._dataloaders - - @property - def cuda_enabled(self): - return self.device.type == "cuda" - - @property - def max_epoch(self): - return int(self.config.run_cfg.max_epoch) - - @property - def log_freq(self): - log_freq = self.config.run_cfg.get("log_freq", 50) - return int(log_freq) - - @property - def init_lr(self): - return float(self.config.run_cfg.init_lr) - - @property - def min_lr(self): - return float(self.config.run_cfg.min_lr) - - @property - def accum_grad_iters(self): - return int(self.config.run_cfg.get("accum_grad_iters", 1)) - - @property - def valid_splits(self): - valid_splits = self.config.run_cfg.get("valid_splits", []) - - if len(valid_splits) == 0: - logging.info("No validation splits found.") - - return valid_splits - - @property - def test_splits(self): - test_splits = self.config.run_cfg.get("test_splits", []) - - return test_splits - - @property - def train_splits(self): - train_splits = self.config.run_cfg.get("train_splits", []) - - if len(train_splits) == 0: - logging.info("Empty train splits.") - - return train_splits - - @property - def evaluate_only(self): - """ - Set to True to skip training. - """ - return self.config.run_cfg.evaluate - - @property - def use_dist_eval_sampler(self): - return self.config.run_cfg.get("use_dist_eval_sampler", True) - - @property - def resume_ckpt_path(self): - return self.config.run_cfg.get("resume_ckpt_path", None) - - @property - def train_loader(self): - train_dataloader = self.dataloaders["train"] - - return train_dataloader - - def setup_output_dir(self): - lib_root = Path(registry.get_path("library_root")) - - output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id - result_dir = output_dir / "result" - - output_dir.mkdir(parents=True, exist_ok=True) - result_dir.mkdir(parents=True, exist_ok=True) - - registry.register_path("result_dir", str(result_dir)) - registry.register_path("output_dir", str(output_dir)) - - self.result_dir = result_dir - self.output_dir = output_dir - - def train(self): - start_time = time.time() - best_agg_metric = 0 - best_epoch = 0 - - self.log_config() - - # resume from checkpoint if specified - if not self.evaluate_only and self.resume_ckpt_path is not None: - self._load_checkpoint(self.resume_ckpt_path) - - for cur_epoch in range(self.start_epoch, self.max_epoch): - # training phase - if not self.evaluate_only: - logging.info("Start training") - train_stats = self.train_epoch(cur_epoch) - self.log_stats(split_name="train", stats=train_stats) - - # evaluation phase - if len(self.valid_splits) > 0: - for split_name in self.valid_splits: - logging.info("Evaluating on {}.".format(split_name)) - - val_log = self.eval_epoch( - split_name=split_name, cur_epoch=cur_epoch - ) - if val_log is not None: - if is_main_process(): - assert ( - "agg_metrics" in val_log - ), "No agg_metrics found in validation log." - - agg_metrics = val_log["agg_metrics"] - if agg_metrics > best_agg_metric and split_name == "val": - best_epoch, best_agg_metric = cur_epoch, agg_metrics - - self._save_checkpoint(cur_epoch, is_best=True) - - val_log.update({"best_epoch": best_epoch}) - self.log_stats(val_log, split_name) - - else: - # if no validation split is provided, we just save the checkpoint at the end of each epoch. - if not self.evaluate_only: - self._save_checkpoint(cur_epoch, is_best=False) - - if self.evaluate_only: - break - - if self.config.run_cfg.distributed: - dist.barrier() - - # testing phase - test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch - self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - logging.info("Training time {}".format(total_time_str)) - - def evaluate(self, cur_epoch="best", skip_reload=False): - test_logs = dict() - - if len(self.test_splits) > 0: - for split_name in self.test_splits: - test_logs[split_name] = self.eval_epoch( - split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload - ) - - return test_logs - - def train_epoch(self, epoch): - # train - self.model.train() - - return self.task.train_epoch( - epoch=epoch, - model=self.model, - data_loader=self.train_loader, - optimizer=self.optimizer, - scaler=self.scaler, - lr_scheduler=self.lr_scheduler, - cuda_enabled=self.cuda_enabled, - log_freq=self.log_freq, - accum_grad_iters=self.accum_grad_iters, - ) - - @torch.no_grad() - def eval_epoch(self, split_name, cur_epoch, skip_reload=False): - """ - Evaluate the model on a given split. - - Args: - split_name (str): name of the split to evaluate on. - cur_epoch (int): current epoch. - skip_reload_best (bool): whether to skip reloading the best checkpoint. - During training, we will reload the best checkpoint for validation. - During testing, we will use provided weights and skip reloading the best checkpoint . - """ - data_loader = self.dataloaders.get(split_name, None) - assert data_loader, "data_loader for split {} is None.".format(split_name) - - # TODO In validation, you need to compute loss as well as metrics - # TODO consider moving to model.before_evaluation() - model = self.unwrap_dist_model(self.model) - if not skip_reload and cur_epoch == "best": - model = self._reload_best_model(model) - model.eval() - - self.task.before_evaluation( - model=model, - dataset=self.datasets[split_name], - ) - results = self.task.evaluation(model, data_loader) - - if results is not None: - return self.task.after_evaluation( - val_result=results, - split_name=split_name, - epoch=cur_epoch, - ) - - def unwrap_dist_model(self, model): - if self.use_distributed: - return model.module - else: - return model - - def create_loaders( - self, - datasets, - num_workers, - batch_sizes, - is_trains, - collate_fns, - dataset_ratios=None, - ): - """ - Create dataloaders for training and validation. - """ - - def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): - # create a single dataloader for each split - if isinstance(dataset, ChainDataset) or isinstance( - dataset, wds.DataPipeline - ): - # wds.WebdDataset instance are chained together - # webdataset.DataPipeline has its own sampler and collate_fn - loader = iter( - DataLoader( - dataset, - batch_size=bsz, - num_workers=num_workers, - pin_memory=True, - ) - ) - else: - # map-style dataset are concatenated together - # setup distributed sampler - if self.use_distributed: - sampler = DistributedSampler( - dataset, - shuffle=is_train, - num_replicas=get_world_size(), - rank=get_rank(), - ) - if not self.use_dist_eval_sampler: - # e.g. retrieval evaluation - sampler = sampler if is_train else None - else: - sampler = None - - loader = DataLoader( - dataset, - batch_size=bsz, - num_workers=num_workers, - pin_memory=True, - sampler=sampler, - shuffle=sampler is None and is_train, - collate_fn=collate_fn, - drop_last=True if is_train else False, - ) - loader = PrefetchLoader(loader) - - if is_train: - loader = IterLoader(loader, use_distributed=self.use_distributed) - - return loader - - loaders = [] - - for dataset, bsz, is_train, collate_fn in zip( - datasets, batch_sizes, is_trains, collate_fns - ): - if isinstance(dataset, list) or isinstance(dataset, tuple): - if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: - dataset_ratios = [d.sample_ratio for d in dataset] - loader = MultiIterLoader( - loaders=[ - _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) - for i, d in enumerate(dataset) - ], - ratios=dataset_ratios, - ) - else: - loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) - - loaders.append(loader) - - return loaders - - @main_process - def _save_checkpoint(self, cur_epoch, is_best=False): - """ - Save the checkpoint at the current epoch. - """ - model_no_ddp = self.unwrap_dist_model(self.model) - param_grad_dic = { - k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() - } - state_dict = model_no_ddp.state_dict() - for k in list(state_dict.keys()): - if k in param_grad_dic.keys() and not param_grad_dic[k]: - # delete parameters that do not require gradient - del state_dict[k] - save_obj = { - "model": state_dict, - "optimizer": self.optimizer.state_dict(), - "config": self.config.to_dict(), - "scaler": self.scaler.state_dict() if self.scaler else None, - "epoch": cur_epoch, - } - save_to = os.path.join( - self.output_dir, - "checkpoint_{}.pth".format("best" if is_best else cur_epoch), - ) - logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) - torch.save(save_obj, save_to) - - def _reload_best_model(self, model): - """ - Load the best checkpoint for evaluation. - """ - checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") - - logging.info("Loading checkpoint from {}.".format(checkpoint_path)) - checkpoint = torch.load(checkpoint_path, map_location="cpu") - try: - model.load_state_dict(checkpoint["model"]) - except RuntimeError as e: - logging.warning( - """ - Key mismatch when loading checkpoint. This is expected if only part of the model is saved. - Trying to load the model with strict=False. - """ - ) - model.load_state_dict(checkpoint["model"], strict=False) - return model - - def _load_checkpoint(self, url_or_filename): - """ - Resume from a checkpoint. - """ - if is_url(url_or_filename): - cached_file = download_cached_file( - url_or_filename, check_hash=False, progress=True - ) - checkpoint = torch.load(cached_file, map_location=self.device, strict=False) - elif os.path.isfile(url_or_filename): - checkpoint = torch.load(url_or_filename, map_location=self.device, strict=False) - else: - raise RuntimeError("checkpoint url or path is invalid") - - state_dict = checkpoint["model"] - self.unwrap_dist_model(self.model).load_state_dict(state_dict) - - self.optimizer.load_state_dict(checkpoint["optimizer"]) - if self.scaler and "scaler" in checkpoint: - self.scaler.load_state_dict(checkpoint["scaler"]) - - self.start_epoch = checkpoint["epoch"] + 1 - logging.info("Resume checkpoint from {}".format(url_or_filename)) - - @main_process - def log_stats(self, stats, split_name): - if isinstance(stats, dict): - log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} - with open(os.path.join(self.output_dir, "log.txt"), "a") as f: - f.write(json.dumps(log_stats) + "\n") - elif isinstance(stats, list): - pass - - @main_process - def log_config(self): - with open(os.path.join(self.output_dir, "log.txt"), "a") as f: - f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") diff --git a/spaces/tabeina/bingo1/Dockerfile b/spaces/tabeina/bingo1/Dockerfile deleted file mode 100644 index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000 --- a/spaces/tabeina/bingo1/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM node:18 - - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set up a new user named "user" with user ID 1000 -RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME - -# Switch to the "user" user -USER user - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Install app dependencies -# A wildcard is used to ensure both package.json AND package-lock.json are copied -# where available (npm@5+) -COPY --chown=user package*.json $HOME/app/ - -RUN npm install - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app/ - -RUN npm run build - -ENV PORT 7860 -EXPOSE 7860 - -CMD npm start diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Solid Patels 720p Tamil Movie Downlo).md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Solid Patels 720p Tamil Movie Downlo).md deleted file mode 100644 index dd7f4c9f37e697306d9a011960546bbfd1c7f441..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Solid Patels 720p Tamil Movie Downlo).md +++ /dev/null @@ -1,6 +0,0 @@ -

        HD Online Player (Solid Patels 720p tamil movie downlo)


        DOWNLOADhttps://bytlly.com/2uGjR6



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/How To Fix Error Rc 5243 Paltalk.md b/spaces/terfces0erbo/CollegeProjectV2/How To Fix Error Rc 5243 Paltalk.md deleted file mode 100644 index 5482faa68d5d55e01d3e4405f47ea4af4b112e58..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/How To Fix Error Rc 5243 Paltalk.md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        when this happens and it won't connect to their servers to talk to new contacts during the window creation, then it's best to run paltalk and/or the app out of your programs to start it and then go back to skype to remove the new window of chat with stranger.

        -

        if you need more then many elements to be locked out, use a third-party app like avast mobile security or xfence for android. but be careful, these apps may have unexpected consequences if you don't know what they are.

        -

        How To Fix Error Rc 5243 Paltalk


        DOWNLOADhttps://bytlly.com/2uGiAB



        -

        the issue with the current firmware is that only the whitelist feature of paltalk chat with strangers is working. in the chat with strangers settings, when you turn on whitelist, it will turn off voice and video calls. now the voice and video chat functions are also not working.

        -

        a runtime error will usually bring your paltalk program to a halt. if paltalk is causing problems, try to restart the program and see if this will fix the problem. if the problem persists, you will need to research the cause of the problem. many of these errors can be traced to the program not being able to load certain files or windows settings. there could be a program or device driver conflict, or a misplaced or corrupt file, registry entry, or file association.

        -

        check to see if your system has a current and compatible version of the microsoft directx dll files. if you are running the 32-bit version of microsoft windows, you will need to download the most recent version for your operating system. make sure you check that the download is from the microsoft download center and that you are able to download the software in a clean non-infected session.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/terrierteam/retrieve/wrapup.md b/spaces/terrierteam/retrieve/wrapup.md deleted file mode 100644 index 578a85b6e708e85e5ba081b5a239e98ba7535497..0000000000000000000000000000000000000000 --- a/spaces/terrierteam/retrieve/wrapup.md +++ /dev/null @@ -1,3 +0,0 @@ -### References & Credits - - - Craig Macdonald, Nicola Tonellotto, Sean MacAvaney, Iadh Ounis. [PyTerrier: Declarative Experimentation in Python from BM25 to Dense Retrieval](https://dl.acm.org/doi/abs/10.1145/3459637.3482013). CIKM 2021. diff --git a/spaces/themanas021/legal-chat/app.py b/spaces/themanas021/legal-chat/app.py deleted file mode 100644 index 9f00581c044247ee844fb72bd20406b4ad452aef..0000000000000000000000000000000000000000 --- a/spaces/themanas021/legal-chat/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import streamlit as st -import os -import base64 -import time -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from transformers import pipeline -import torch -import textwrap -from langchain.document_loaders import PyPDFLoader, DirectoryLoader, PDFMinerLoader -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.embeddings import SentenceTransformerEmbeddings -from langchain.vectorstores import Chroma -from langchain.llms import HuggingFacePipeline -from langchain.chains import RetrievalQA -from constants import CHROMA_SETTINGS -from streamlit_chat import message - -st.set_page_config(layout="wide") - -device = torch.device('cpu') - -checkpoint = "MBZUAI/LaMini-T5-738M" -print(f"Checkpoint path: {checkpoint}") # Add this line for debugging -tokenizer = AutoTokenizer.from_pretrained(checkpoint) -base_model = AutoModelForSeq2SeqLM.from_pretrained( - checkpoint, - device_map=device, - torch_dtype=torch.float32 -) - -persist_directory = "db" - -@st.cache_resource -def data_ingestion(): - for root, dirs, files in os.walk("docs"): - for file in files: - if file.endswith(".pdf"): - print(file) - loader = PDFMinerLoader(os.path.join(root, file)) - documents = loader.load() - text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=500) - texts = text_splitter.split_documents(documents) - #create embeddings here - embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") - #create vector store here - db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS) - db.persist() - db=None - -@st.cache_resource -def llm_pipeline(): - pipe = pipeline( - 'text2text-generation', - model = base_model, - tokenizer = tokenizer, - max_length = 256, - do_sample = True, - temperature = 0.3, - top_p= 0.95, - device=device - ) - local_llm = HuggingFacePipeline(pipeline=pipe) - return local_llm - -@st.cache_resource -def qa_llm(): - llm = llm_pipeline() - embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") - db = Chroma(persist_directory="db", embedding_function = embeddings, client_settings=CHROMA_SETTINGS) - retriever = db.as_retriever() - qa = RetrievalQA.from_chain_type( - llm = llm, - chain_type = "stuff", - retriever = retriever, - return_source_documents=True - ) - return qa - -def process_answer(instruction): - response = '' - instruction = instruction - qa = qa_llm() - generated_text = qa(instruction) - answer = generated_text['result'] - return answer - -def get_file_size(file): - file.seek(0, os.SEEK_END) - file_size = file.tell() - file.seek(0) - return file_size - -@st.cache_data -#function to display the PDF of a given file -def displayPDF(file): - # Opening file from file path - with open(file, "rb") as f: - base64_pdf = base64.b64encode(f.read()).decode('utf-8') - - # Embedding PDF in HTML - pdf_display = F'' - - # Displaying File - st.markdown(pdf_display, unsafe_allow_html=True) - -# Display conversation history using Streamlit messages -def display_conversation(history): - for i in range(len(history["generated"])): - message(history["past"][i], is_user=True, key=str(i) + "_user") - message(history["generated"][i],key=str(i)) - -def main(): - st.markdown("

        Chat with your PDF 🦜📄

        ", unsafe_allow_html=True) - st.markdown("

        Built by AI Anytime with ❤️

        ", unsafe_allow_html=True) - - st.markdown("

        Upload your PDF 👇

        ", unsafe_allow_html=True) - - uploaded_file = st.file_uploader("", type=["pdf"]) - - if uploaded_file is not None: - file_details = { - "Filename": uploaded_file.name, - "File size": get_file_size(uploaded_file) - } - filepath = "docs/"+uploaded_file.name - with open(filepath, "wb") as temp_file: - temp_file.write(uploaded_file.read()) - - col1, col2= st.columns([1,2]) - with col1: - st.markdown("

        File details

        ", unsafe_allow_html=True) - st.json(file_details) - st.markdown("

        File preview

        ", unsafe_allow_html=True) - pdf_view = displayPDF(filepath) - - with col2: - with st.spinner('Embeddings are in process...'): - ingested_data = data_ingestion() - st.success('Embeddings are created successfully!') - st.markdown("

        Chat Here

        ", unsafe_allow_html=True) - - - user_input = st.text_input("", key="input") - - # Initialize session state for generated responses and past messages - if "generated" not in st.session_state: - st.session_state["generated"] = ["I am ready to help you"] - if "past" not in st.session_state: - st.session_state["past"] = ["Hey there!"] - - # Search the database for a response based on user input and update session state - if user_input: - answer = process_answer({'query': user_input}) - st.session_state["past"].append(user_input) - response = answer - st.session_state["generated"].append(response) - - # Display conversation history using Streamlit messages - if st.session_state["generated"]: - display_conversation(st.session_state) - -if __name__ == "__main__": - main() - diff --git a/spaces/thu-coai/DA-Transformer/README.md b/spaces/thu-coai/DA-Transformer/README.md deleted file mode 100644 index ddb015a63f84d6023ef3df1d758a9f6773279dcf..0000000000000000000000000000000000000000 --- a/spaces/thu-coai/DA-Transformer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DA-Transformer -emoji: ⚡ -colorFrom: red -colorTo: orange -sdk: docker -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Accumark Gerber.md b/spaces/tialenAdioni/chat-gpt-api/logs/Accumark Gerber.md deleted file mode 100644 index b591974b4bfc3c532ef28e6366e5f7b6f2934cbb..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Accumark Gerber.md +++ /dev/null @@ -1,7 +0,0 @@ - -

        How to Download and Install Gerber AccuMark Software for Free

        -

        Gerber AccuMark is a suite of CAD software applications for the fashion and apparel industry. It allows you to design, develop, grade, and optimize patterns and markers in 2D and 3D. It also helps you to create virtual samples, collect and analyze data, and streamline your workflow. Gerber AccuMark is a powerful and versatile tool that can help you create better products faster and more efficiently.

        -

        However, Gerber AccuMark is not a cheap software. You need to buy a license or subscribe to a service plan to use it. If you are looking for a way to get Gerber AccuMark software for free, you might be tempted to download a cracked version from the internet. But is this a good idea? What are the risks and consequences of using a cracked software? And are there any alternatives to get Gerber AccuMark software legally and ethically? In this article, we will answer these questions and give you some tips on how to download and install Gerber AccuMark software for free.

        -

        accumark gerber


        DOWNLOADhttps://urlcod.com/2uK7Jw



        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Billu Barber Full Movie Hd Free Download A Tale of Humor Hope and Humanity.md b/spaces/tialenAdioni/chat-gpt-api/logs/Billu Barber Full Movie Hd Free Download A Tale of Humor Hope and Humanity.md deleted file mode 100644 index 9a4f8adce0bdbc94466e94d48f1ef120300ded6f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Billu Barber Full Movie Hd Free Download A Tale of Humor Hope and Humanity.md +++ /dev/null @@ -1,130 +0,0 @@ -
        -

        Billu Barber Full Movie Hd Free Download

        -

        If you are looking for a comedy-drama movie that will make you laugh and cry, then you should watch Billu Barber (2009). This movie is about a simple barber named Billu, who lives in a small village with his wife and two children. His life changes when a famous actor, Sahir Khan, comes to his village for a film shoot. Billu and Sahir have a past connection, but Billu is too shy to reveal it. Meanwhile, the villagers start to treat Billu differently, hoping that he will introduce them to Sahir. What will happen when the truth comes out?

        -

        Why You Should Watch Billu Barber Full Movie in HD Quality

        -

        Billu Barber is a movie that will touch your heart with its story of friendship, loyalty and love. The movie has a stellar cast, featuring Irrfan Khan as Billu, Shah Rukh Khan as Sahir, Lara Dutta as Bindiya, and Om Puri as Daamchand. The movie also has some catchy songs, composed by Pritam and sung by various artists. The movie is directed by Priyadarshan, who is known for his comedy films. The movie has received positive reviews from critics and audiences alike, and has won several awards.

        -

        Billu Barber Full Movie Hd Free Download


        DOWNLOAD ✶✶✶ https://urlcod.com/2uK862



        -

        How to Watch or Download Billu Barber Full Movie in HD for Free

        -

        There are many ways to watch or download Billu Barber full movie in HD quality for free online. You can use streaming platforms like Netflix, Amazon Prime Video, Hotstar, or YouTube. You can also use torrent sites like The Pirate Bay, Kickass Torrents, or 1337x. However, you should be careful of the legal and ethical issues involved in using these methods. You may face copyright infringement charges or malware attacks if you use these methods.

        -

        A Better Alternative to Watch or Download Billu Barber Full Movie in HD for Free

        -

        A better alternative to watch or download Billu Barber full movie in HD quality for free is to use a reliable and safe website that offers legal and high-quality content. One such website is Moovies Planet, which is the hub for latest movies. Moovies Planet has a huge collection of movies from various genres and languages. You can watch or download Billu Barber full movie in HD quality for free on Moovies Planet without any hassle. All you need to do is follow these simple steps:

        -
          -
        • Open the link below provided by Moovies Planet.
        • -
        • Tap on click here to download.
        • -
        • Wait for timer to generate link.
        • -
        • Click on go to link.
        • -
        • Enjoy watching or downloading Billu Barber full movie in HD quality for free.
        • -
        -

        Download ! Billu Barber (2009) Movie Download 480p [400 MB]

        -

        Download ! Billu Barber (2009) Movie Download 720p [1GB]

        -
        Conclusion
        -

        Billu Barber is a movie that you should not miss if you love comedy-drama movies. It is a movie that will make you laugh, cry and feel inspired. You can watch or download Billu Barber full movie in HD quality for free on Moovies Planet, which is a trusted and secure website that offers legal and high-quality content. So, what are you waiting for? Go ahead and watch or download Billu Barber full movie in HD quality for free now!

        -

        Billu Barber 2009 Hindi film download in HD quality
        -Watch Billu Barber full movie online free streaming
        -How to download Billu Barber movie for free in high definition
        -Billu Barber full movie with English subtitles HD download
        -Billu Barber HD movie download link
        -Billu Barber full movie free download 720p
        -Billu Barber full movie watch online HD
        -Download Billu Barber full movie in Hindi HD
        -Billu Barber full movie free download mp4
        -Billu Barber full movie HD 1080p download
        -Billu Barber full movie online free HD
        -Billu Barber full movie download filmywap
        -Billu Barber full movie download filmyzilla
        -Billu Barber full movie download utorrent
        -Billu Barber full movie download pagalworld
        -Billu Barber full movie download moviescounter
        -Billu Barber full movie download worldfree4u
        -Billu Barber full movie download khatrimaza
        -Billu Barber full movie download bolly4u
        -Billu Barber full movie download coolmoviez
        -Billu Barber full movie download skymovies
        -Billu Barber full movie download hdpopcorns
        -Billu Barber full movie download moviesflix
        -Billu Barber full movie download 9xmovies
        -Billu Barber full movie download movierulz
        -Billu Barber Shah Rukh Khan cameo HD download
        -Billu Barber songs download in HD quality
        -Billu Barber comedy scenes download in HD
        -Billu Barber behind the scenes HD download
        -Billu Barber making of the film HD download
        -Billu Barber trailer HD download
        -Billu Barber poster HD download
        -Billu Barber wallpapers HD download
        -Billu Barber cast and crew details HD download
        -Billu Barber reviews and ratings HD download
        -Billu Barber box office collection HD download
        -Billu Barber awards and nominations HD download
        -Billu Barber trivia and facts HD download
        -Billu Barber quotes and dialogues HD download
        -Billu Barber subtitles download in HD quality
        -Download Billu Barber in dual audio HD quality
        -Download Billu Barber in Blu-ray HD quality
        -Download Billu Barber in DVDrip HD quality
        -Download Billu Barber in MKV format HD quality
        -Download Billu Barber in AVI format HD quality
        -Download Billu Barber in MP4 format HD quality
        -Download Billu Barber in FLV format HD quality
        -Download Billu Barber in WMV format HD quality
        -Download Billu Barber in MOV format HD quality
        -Download Billu Barber in 3GP format HD quality

        -
        What is the Story of Billu Barber Full Movie?
        -

        Billu Barber is a movie that tells the story of a humble barber named Billu, who lives in Budbuda, a small village in Uttar Pradesh. He runs a small salon with the help of his wife Bindiya and his two children. He is a simple and honest man, who is respected by his customers and friends. One day, a film crew arrives in the village to shoot a movie starring Sahir Khan, a famous Bollywood actor. Billu is shocked to see Sahir, as he claims to be his childhood friend. However, he is too afraid to approach him, fearing that he might not recognize him.

        -

        Soon, the villagers get to know about Billu's connection with Sahir, and they start to treat him differently. They shower him with gifts and favors, hoping that he will introduce them to Sahir. They also start to spread rumors about Billu's friendship with Sahir, which makes Bindiya suspicious and insecure. Billu tries to clear the misunderstandings, but no one believes him. He feels lonely and isolated, as he loses his dignity and identity. He also faces the wrath of Daamchand, a powerful businessman who wants to meet Sahir at any cost.

        -

        Will Billu ever meet Sahir and prove his friendship? Will Sahir remember Billu and acknowledge him? Will Billu regain his respect and happiness? To find out, you have to watch Billu Barber full movie in HD quality for free on Moovies Planet.

        -What are the Reviews of Billu Barber Full Movie? -

        Billu Barber is a movie that has received positive reviews from both critics and audiences. The movie has been praised for its story, direction, performances, music and message. The movie has been described as a heartwarming and humorous tale of friendship and humanity. The movie has also been appreciated for its portrayal of rural India and its culture.

        -

        The movie has been rated 6.4/10 by IMDb, 3/5 by Times of India, 3/5 by Hindustan Times, and 4/5 by Rediff.com. The movie has also won several awards, such as the Filmfare Award for Best Actor (Critics) for Irrfan Khan, the Stardust Award for Best Film (Comedy), the Zee Cine Award for Best Story, and the IIFA Award for Best Supporting Actor for Shah Rukh Khan.

        -What are the Benefits of Watching or Downloading Billu Barber Full Movie in HD Quality for Free? -

        There are many benefits of watching or downloading Billu Barber full movie in HD quality for free on Moovies Planet. Some of them are:

        -
          -
        • You can enjoy the movie in high definition quality, which enhances your viewing experience.
        • -
        • You can save your time and money, as you do not have to go to the theater or pay for any subscription.
        • -
        • You can watch or download the movie at your convenience, as you do not have to follow any schedule or deadline.
        • -
        • You can avoid any ads or interruptions, which can ruin your mood or interest.
        • -
        • You can share the movie with your friends or family, who may also like it.
        • -
        -

        So, what are you waiting for? Watch or download Billu Barber full movie in HD quality for free on Moovies Planet now!

        -What are the Songs of Billu Barber Full Movie? -

        Billu Barber is a movie that has some amazing songs that will make you groove and feel the emotions of the characters. The songs are composed by Pritam, who is one of the most popular music directors in Bollywood. The songs are sung by various artists, such as Neeraj Shridhar, Rahat Fateh Ali Khan, Abhijeet, Ajay Jhingran, Kalpana Patowary, Soham Chakraborty, Rana Mazumder, Akruti Kakkar, Sunidhi Chauhan, Sukhwinder Singh and Shreya Ghoshal. The songs are written by Gulzar, Sayeed Quadri, Ashish Pandit and Neeraj Shridhar.

        -

        The songs of Billu Barber are:

        -
          -
        1. Marjaani - A peppy and energetic song that features Shah Rukh Khan and Kareena Kapoor Khan in a dance number.
        2. -
        3. Love Mera Hit Hit - A romantic and catchy song that features Shah Rukh Khan and Deepika Padukone in a glamorous setting.
        4. -
        5. You Get Me Rockin & Reeling - A fun and upbeat song that features Shah Rukh Khan and Priyanka Chopra in a discotheque.
        6. -
        7. Jaoon Kahan - A sad and soulful song that expresses the feelings of Billu, who is heartbroken and lonely.
        8. -
        9. Billoo Bhayankar - A humorous and quirky song that describes the life and personality of Billu.
        10. -
        11. Khudaya Khair - A sweet and melodious song that shows the love and bond between Billu and his wife Bindiya.
        12. -
        13. Ae Aa O - A lively and cheerful song that celebrates the friendship between Billu and Sahir.
        14. -
        -

        You can listen to these songs on various platforms like YouTube, Spotify, Gaana, JioSaavn, or Wynk. You can also watch or download Billu Barber full movie in HD quality for free on Moovies Planet to enjoy these songs along with the movie.

        -What is the Message of Billu Barber Full Movie? -

        Billu Barber is a movie that has a powerful message that will inspire you and make you think. The movie conveys the message of friendship, loyalty and humanity. The movie shows that friendship is not based on fame or fortune, but on trust and respect. The movie shows that loyalty is not measured by words or deeds, but by heart and soul. The movie shows that humanity is not defined by status or power, but by kindness and compassion.

        -

        The movie teaches us to value our true friends, who will stand by us in our good and bad times. The movie teaches us to be loyal to our loved ones, who will support us in our dreams and aspirations. The movie teaches us to be humane to everyone, who will appreciate us for our simplicity and honesty. The movie teaches us to be ourselves, who will shine with our own greatness.

        -

        Billu Barber is a movie that will make you laugh, cry and feel inspired. It is a movie that you should watch or download in HD quality for free on Moovies Planet.

        -What are the Trivia of Billu Barber Full Movie? -

        Billu Barber is a movie that has some interesting trivia that will make you curious and amazed. Here are some of them:

        -
          -
        • The movie is a remake of the Malayalam film Kadha Parayumbol (2007), which was also remade in Tamil as Kuselan (2008) and in Telugu as Kathanayakudu (2008).
        • -
        • The movie was originally titled Billu Barber, but the word "barber" was removed from the title and posters after the Salon and Beauty Parlour Association protested against it, claiming that it was derogatory.
        • -
        • The movie features guest appearances by three Bollywood actresses - Kareena Kapoor Khan, Deepika Padukone and Priyanka Chopra - who play themselves in the songs with Shah Rukh Khan.
        • -
        • The movie was shot in various locations in India, such as Pollachi in Tamil Nadu, Noida in Uttar Pradesh, Mumbai in Maharashtra and Panchgani in Maharashtra.
        • -
        • The movie was released on 13 February 2009, coinciding with Valentine's Day weekend.
        • -
        -

        You can learn more trivia about Billu Barber full movie by watching or downloading it in HD quality for free on Moovies Planet.

        -What are the Ratings of Billu Barber Full Movie? -

        Billu Barber is a movie that has received good ratings from various sources. Here are some of them:

        - - - - - - - -
        SourceRating
        IMDb6.4/10
        Rotten Tomatoes67%
        Metacritic55/100
        Bollywood Hungama3.5/5
        NDTV Movies3/5
        -

        You can check more ratings of Billu Barber full movie by visiting various websites or by watching or downloading it in HD quality for free on Moovies Planet.

        -Conclusion -

        Billu Barber is a movie that you should watch if you love comedy-drama movies. It is a movie that will make you laugh, cry and feel inspired. It is a movie that has a great story, direction, performances, music and message. It is a movie that shows the value of friendship, loyalty and humanity. You can watch or download Billu Barber full movie in HD quality for free on Moovies Planet, which is a reliable and safe website that offers legal and high-quality content. So, don't wait any longer and watch or download Billu Barber full movie in HD quality for free now!

        679dcb208e
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Disk Drill Pro Activation Code Keygen How to Restore Your Office Documents Messages and Media Files.md b/spaces/tialenAdioni/chat-gpt-api/logs/Disk Drill Pro Activation Code Keygen How to Restore Your Office Documents Messages and Media Files.md deleted file mode 100644 index 8865bafaf0cdcb068829577fb636f842d2c4d37d..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Disk Drill Pro Activation Code Keygen How to Restore Your Office Documents Messages and Media Files.md +++ /dev/null @@ -1,114 +0,0 @@ - -

        Disk Drill Pro Activation Code Keygen: How to Recover Your Lost Data with Ease

        -

        Have you ever lost your important files due to accidental deletion, formatting, virus attack, power outage, or other reasons? If yes, then you know how frustrating and stressful it can be to lose your precious data. But don't worry, there is a solution that can help you recover your lost data in no time. It's called Disk Drill Pro, a powerful data recovery software that can restore any type of file from any device and file system.

        -

        However, to use Disk Drill Pro, you need a valid activation code keygen that can unlock all its features and functions. In this article, we will show you what Disk Drill Pro is, what features it offers, how to download and install it, how to use it to recover your lost data, and some tips and tricks to avoid data loss in the future. So, let's get started!

        -

        diskdrillproactivationcodekeygen


        Download Zip ✸✸✸ https://urlcod.com/2uK3xT



        -

        Features of Disk Drill Pro

        -

        Disk Drill Pro is a professional data recovery software that can recover over 400 file formats from any storage device, such as hard drives, SSDs, USB flash drives, memory cards, digital cameras, etc. It supports all major file systems, such as NTFS, FAT32, exFAT, HFS+, APFS, EXT4, etc. It also offers some advanced features that make it stand out from other data recovery tools. Here are some of them:

        -

        Data recovery from any device and file system

        -

        Disk Drill Pro can scan and recover data from any device that can be connected to your computer, such as internal and external hard drives, USB flash drives, memory cards, digital cameras, etc. It can also recover data from damaged or corrupted disks, partitions, or volumes. It supports all major file systems, such as NTFS, FAT32, exFAT, HFS+, APFS, EXT4, etc. It can even recover data from RAW disks or partitions that are not recognized by your operating system.

        -

        Data protection with Recovery Vault and Guaranteed Recovery

        -

        Disk Drill Pro also offers some unique data protection features that can prevent data loss in the first place. One of them is Recovery Vault, which is a hidden database that keeps track of all the files that you delete from your disk. If you ever need to recover a deleted file, you can simply use Recovery Vault to restore it in a few clicks. Another feature is Guaranteed Recovery, which is a background service that saves a copy of every file that you move to the Trash or Recycle Bin. This way, you can always recover any file that you delete by mistake.

        -

        Data backup and disk health monitoring

        -

        Disk Drill Pro also helps you backup your data and monitor your disk health. You can use Disk Drill Pro to create byte-to-byte disk images of your disks or partitions. This allows you to create an exact copy of your data that can be used for data recovery or disk cloning. You can also use Disk Drill Pro to check your disk for errors or bad sectors. This can help you fix any disk problems or prevent potential data loss.

        -

        How to Download and Install Disk Drill Pro

        -

        Now that you know what Disk Drill Pro can do for you, let's see how to download and install it on your computer.

        -

        Download Disk Drill Pro from the official website or a trusted source

        -

        The first step is to download Disk Drill Pro from its official website or a trusted source. You can find the download links for Windows and Mac versions on the CleverFiles website. You can also find some alternative download sources on the internet, but make sure they are safe and reliable before downloading anything.

        -

        Install Disk Drill Pro on your Windows or Mac computer

        -

        The next step is to install Disk Drill Pro on your computer. The installation process is very simple and straightforward. Just follow these steps:

        -

        disk drill pro activation code generator
        -disk drill pro activation code crack
        -disk drill pro activation code free
        -disk drill pro activation code mac
        -disk drill pro activation code windows
        -disk drill pro activation code 2021
        -disk drill pro activation code 2022
        -disk drill pro activation code reddit
        -disk drill pro activation code online
        -disk drill pro activation code download
        -disk drill pro keygen mac
        -disk drill pro keygen windows
        -disk drill pro keygen crack
        -disk drill pro keygen free
        -disk drill pro keygen download
        -disk drill pro keygen 2021
        -disk drill pro keygen 2022
        -disk drill pro keygen reddit
        -disk drill pro keygen online
        -disk drill pro keygen generator
        -disk drill pro crack + activation code
        -disk drill pro crack + keygen
        -disk drill pro crack + serial key
        -disk drill pro crack + license key
        -disk drill pro crack + registration code
        -disk drill pro serial key + activation code
        -disk drill pro serial key + keygen
        -disk drill pro serial key + crack
        -disk drill pro serial key + license key
        -disk drill pro serial key + registration code
        -disk drill pro license key + activation code
        -disk drill pro license key + keygen
        -disk drill pro license key + crack
        -disk drill pro license key + serial key
        -disk drill pro license key + registration code
        -disk drill pro registration code + activation code
        -disk drill pro registration code + keygen
        -disk drill pro registration code + crack
        -disk drill pro registration code + serial key
        -disk drill pro registration code + license key

        -
          -
        • Run the downloaded installer file and follow the instructions on the screen.
        • -
        • Accept the license agreement and choose the destination folder for Disk Drill Pro.
        • -
        • Choose whether you want to install Recovery Vault and Guaranteed Recovery features.
        • -
        • Click on Install and wait for the installation to complete.
        • -
        • Click on Finish and launch Disk Drill Pro.
        • -
        -

        Activate Disk Drill Pro with a valid activation code keygen

        -

        The final step is to activate Disk Drill Pro with a valid activation code keygen. An activation code keygen is a tool that generates a unique code that can unlock all the features and functions of Disk Drill Pro. You can find some activation code keygens on the internet, but be careful as some of them may contain viruses or malware. To activate Disk Drill Pro with an activation code keygen, follow these steps:

        -
          -
        • Open Disk Drill Pro and click on Upgrade to PRO button.
        • -
        • Enter your name and email address in the fields provided.
        • -
        • Run the activation code keygen tool and copy the generated code.
        • -
        • Paste the code in the Activation Code field in Disk Drill Pro.
        • -
        • Click on Activate PRO Version button and enjoy using Disk Drill Pro.
        • -
        -

        How to Use Disk Drill Pro to Recover Your Lost Data

        -

        Now that you have downloaded, installed, and activated Disk Drill Pro on your computer, let's see how to use it to recover your lost data.

        -

        Select the disk or partition where you lost your data

        -

        The first step is to select the disk or partition where you lost your data. You can see all the available disks and partitions on your computer in the main window of Disk Drill Pro. You can also see some information about them, such as their name, size, file system type, etc. You can also filter them by their status (healthy or damaged) or by their type (internal or external). To select a disk or partition for data recovery,

        click on its icon in the list.

        Choose a scan mode and start scanning

        -

        The next step is to choose a scan mode and start scanning for lost files. Disk Drill Pro offers three scan modes for different situations:

        -
          -
        • Quick Scan: This scan mode is fast and efficient. It scans for deleted files that are still marked as available by the file system. It works best for recently deleted files or files that were deleted by emptying the Trash or Recycle Bin.
        • -
        • Deep Scan: This scan mode is thorough and comprehensive. It scans for deleted files that are not marked as available by the file system. It works best for formatted disks or partitions or files that were deleted by using Shift+Delete keys.
        • -
        • All Recovery Methods: This scan mode combines both Quick Scan and Deep Scan modes. It scans for all types of deleted files regardless of how they were deleted. It works best for complex data loss scenarios or when you are not sure which scan mode to use.
        • -
        -

        To choose a scan mode and start scanning,

        click on one of the buttons next to the disk or partition name.

        Preview and recover your files

        -

        The final step is to preview and recover your files. After scanning is completed,

        you will see a list of all the found files in different categories (pictures,

        , documents,

        , videos,

        , etc.). You can also filter them by their name,

        , size,

        , date,

        , etc., or search for specific files using keywords. ```html

        To preview a file, click on its name and you will see a thumbnail or a text preview of its content. You can also double-click on the file to open it in its default application.

        -

        To recover a file, check the box next to its name and click on the Recover button at the top. You will be asked to choose a location to save the recovered file. Make sure you choose a different disk or partition than the one you are recovering from to avoid overwriting your data. You can also recover multiple files at once by selecting them all and clicking on the Recover button.

        -

        Tips and Tricks to Avoid Data Loss in the Future

        -

        Disk Drill Pro is a great tool that can help you recover your lost data in various situations. However, it is always better to prevent data loss than to deal with it later. Here are some tips and tricks that can help you avoid data loss in the future:

        -

        Backup your data regularly

        -

        The best way to avoid data loss is to backup your data regularly. You can use Disk Drill Pro to create disk images of your disks or partitions that can be used for data recovery or disk cloning. You can also use other backup software or cloud services to backup your data online or offline. You should backup your data at least once a week or more frequently if you have important or sensitive data.

        -

        Use a reliable antivirus program and update it frequently

        -

        Another way to avoid data loss is to use a reliable antivirus program and update it frequently. This can help you protect your data from virus attacks, malware infections, ransomware threats, etc. You should scan your computer regularly for any potential threats and remove them as soon as possible. You should also avoid opening suspicious email attachments or downloading untrusted software or websites.

        -

        Avoid using untrusted software or websites

        -

        A third way to avoid data loss is to avoid using untrusted software or websites that may harm your data or system. Some software or websites may contain viruses, malware, spyware, adware, etc., that may delete, corrupt, encrypt, or steal your data. You should only use software or websites that are safe and reliable and have good reviews and ratings. You should also read the terms and conditions and privacy policies of any software or website before using it.

        -

        Conclusion

        -

        In conclusion, Disk Drill Pro is a powerful data recovery software that can recover any type of file from any device and file system. It also offers some advanced features that can protect your data from loss, backup your data, and monitor your disk health. To use Disk Drill Pro, you need a valid activation code keygen that can unlock all its features and functions. You can download Disk Drill Pro from its official website or a trusted source, install it on your computer, activate it with an activation code keygen, and use it to recover your lost data in three simple steps: select the disk or partition where you lost your data, choose a scan mode and start scanning, and preview and recover your files. You can also follow some tips and tricks to avoid data loss in the future, such as backup your data regularly, use a reliable antivirus program and update it frequently, and avoid using untrusted software or websites.

        -

        We hope this article has helped you understand what Disk Drill Pro is, what features it offers, how to download and install it, how to use it to recover your lost data, and some tips and tricks to avoid data loss in the future. If you have any questions or comments,

        please feel free to contact us.

        FAQs

        -
          -
        • Q: Is Disk Drill Pro free?
        • -
        • A: Disk Drill Pro is not free,

          but it offers a free trial version that allows you to scan for lost files and preview them before recovering them. To recover your files,

          you need to purchase a license for Disk Drill Pro that comes with an activation code keygen.
        • -
        • Q: How much does Disk Drill Pro cost?
        • -
        • A: Disk Drill Pro costs $89 for a lifetime license that can be used on up to three computers. However,

          you can get Disk Drill Pro with 20% off by using this coupon code: DD-SAVE-20.
        • -
        • Q: What are the system requirements for Disk Drill Pro?
        • -
        • A: Disk Drill Pro works on Windows XP/Vista/7/8/10 and Mac OS X 10.8.5 or later. It requires at least 512 MB of RAM,

          200 MB of disk space,

          and an internet connection for activation.
        • -
        • Q: Does Disk Drill Pro support Android and iOS devices?
        • -
        • A: Yes,

          Disk Drill Pro supports Android and iOS devices as well as Windows and Mac computers. You can recover data from your Android or iOS device by connecting it to your computer via USB cable and scanning it with Disk Drill Pro.
        • -
        • Q: Can Disk Drill Pro recover overwritten files?
        • -
        • A: It depends on how much the files have been overwritten by new data. If the files have been partially overwritten,

          Disk Drill Pro may be able to recover some fragments of them. If the files have been completely overwritten,

          Disk Drill Pro may not be able to recover them at all.
        • -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Dogz 5 Free Download Full Version Where to Find and Download the Best Version of the Game.md b/spaces/tialenAdioni/chat-gpt-api/logs/Dogz 5 Free Download Full Version Where to Find and Download the Best Version of the Game.md deleted file mode 100644 index c8bdde733a41ae3e52563abb053d1cde3a142022..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Dogz 5 Free Download Full Version Where to Find and Download the Best Version of the Game.md +++ /dev/null @@ -1,156 +0,0 @@ - -

        Dogz 5 Free Download Full Version

        -

        Introduction

        -

        Do you love dogs? Do you wish you could have your own virtual pet that you can play with, care for, and breed? If you answered yes to any of these questions, then you might be interested in Dogz 5, a simulation game that lets you create and raise your own adorable dogz. In this article, we will tell you what Dogz 5 is, why you should download it, and how to do it for free. We will also give you some tips and tricks for playing the game and having fun with your furry friends.

        -

        What is Dogz 5?

        -

        Dogz 5 is the fifth installment of the popular Petz series, developed by PF Magic and published by Ubisoft in 2002. It is a game that simulates the life of a dog owner, allowing you to adopt, name, feed, groom, train, and breed different breeds of dogs. You can also customize your dogz' appearance, personality, and environment by choosing from various accessories, toys, clothes, and scenery. You can also interact with other players online and exchange dogz with them.

        -

        Dogz 5 Free Download Full Version


        Download Ziphttps://urlcod.com/2uK3Xw



        -

        Why should you download Dogz 5?

        -

        Dogz 5 is a game that offers many benefits for dog lovers and gamers alike. Here are some of the reasons why you should download Dogz 5:

        -
          -
        • It is fun and relaxing. Playing with your virtual pets can help you relieve stress, improve your mood, and stimulate your creativity.
        • -
        • It is educational. You can learn about different breeds of dogs, their characteristics, behaviors, and needs. You can also learn about genetics, inheritance, and evolution by breeding your dogz.
        • -
        • It is social. You can connect with other players who share your passion for dogs and make new friends. You can also share your dogz with them and see how they grow and develop.
        • -
        • It is free. You don't have to pay anything to download and play Dogz 5. You can enjoy the game without any limitations or restrictions.
        • -
        -

        How to download Dogz 5 for free?

        -

        If you are convinced that Dogz 5 is a game worth playing, you might be wondering how to download it for free. Well, don't worry, because we have got you covered. Here are the steps you need to follow to get Dogz 5 on your computer:

        -

        Step 1: Find a reliable source

        -

        The first thing you need to do is find a website that offers Dogz 5 for free download. There are many websites that claim to provide this service, but not all of them are trustworthy or safe. Some of them might contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you need to be careful and do some research before downloading anything from the internet.

        -

        Dogz 5 PC Game Free Download Full Version
        -How to Download and Install Dogz 5 for Free
        -Dogz 5 Full Version Free Download No Survey
        -Dogz 5 Crack + Serial Key Free Download
        -Dogz 5 Free Download Full Version for Windows 10
        -Dogz 5 Free Download Full Version for Mac
        -Dogz 5 Free Download Full Version with All Breeds
        -Dogz 5 Free Download Full Version with Mods
        -Dogz 5 Free Download Full Version ISO File
        -Dogz 5 Free Download Full Version Torrent Link
        -Dogz 5 Free Download Full Version Highly Compressed
        -Dogz 5 Free Download Full Version Online Multiplayer
        -Dogz 5 Free Download Full Version Safe and Secure
        -Dogz 5 Free Download Full Version No Virus
        -Dogz 5 Free Download Full Version No Password
        -Dogz 5 Free Download Full Version No CD Key
        -Dogz 5 Free Download Full Version No Registration
        -Dogz 5 Free Download Full Version No Ads
        -Dogz 5 Free Download Full Version No Trial
        -Dogz 5 Free Download Full Version Unlimited Play Time
        -Dogz 5 Free Download Full Version Latest Update
        -Dogz 5 Free Download Full Version New Features
        -Dogz 5 Free Download Full Version Best Graphics
        -Dogz 5 Free Download Full Version Best Soundtrack
        -Dogz 5 Free Download Full Version Best Gameplay
        -Dogz 5 Free Download Full Version Tips and Tricks
        -Dogz 5 Free Download Full Version Cheats and Hacks
        -Dogz 5 Free Download Full Version Walkthrough and Guide
        -Dogz 5 Free Download Full Version Review and Rating
        -Dogz 5 Free Download Full Version Comparison and Contrast
        -Dogz 5 vs Catz 5 Free Download Full Version
        -Petz World vs Dogz 5 Free Download Full Version
        -Nintendogs vs Dogz 5 Free Download Full Version
        -The Sims vs Dogz 5 Free Download Full Version
        -Animal Crossing vs Dogz 5 Free Download Full Version
        -Minecraft vs Dogz 5 Free Download Full Version
        -Roblox vs Dogz 5 Free Download Full Version
        -Fortnite vs Dogz 5 Free Download Full Version
        -Among Us vs Dogz 5 Free Download Full Version
        -Fall Guys vs Dogz 5 Free Download Full Version
        -Where to Find the Best Deals on Dogz 5 Free Download Full Version
        -How to Save Money on Dogz 5 Free Download Full Version
        -How to Make Money with Dogz 5 Free Download Full Version
        -How to Create Your Own Custom Dogs in Dogz 5 Free Download Full Version
        -How to Breed Your Dogs in Dogz 5 Free Download Full Version
        -How to Train Your Dogs in Dogz 5 Free Download Full Version
        -How to Care for Your Dogs in Dogz 5 Free Download Full Version
        -How to Have Fun with Your Dogs in Dogz 5 Free Download Full Version
        -How to Share Your Dogs with Other Players in Dogz 5 Free Download Full Version

        -

        One of the websites that we recommend is Old-Games.com (https://www.old-games.com/download/6148/dogz-5). This website has a large collection of old games that are no longer available or supported by their original developers or publishers. It also has a good reputation among gamers and reviewers for being safe and reliable. You can download Dogz 5 from this website without any risk or hassle.

        -

        Step 2: Check the system requirements

        -

        The next thing you need to do is check if your computer meets the minimum system requirements for running Dogz 5. These are the specifications that your computer needs to have in order to play the game smoothly and without any problems. Here are the system requirements for Dogz 5:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        Operating SystemWindows XP/Vista/7/8/10
        CPUPentium II 266 MHz or higher
        RAM64 MB or more
        Hard Disk Space500 MB or more
        Video CardDirectX compatible with 8 MB or more
        Sound CardDirectX compatible
        CD-ROM Drive4x speed or faster
        Internet ConnectionRequired for online play
        -

        If your computer meets these requirements, then you are good to go. If not, then you might need to upgrade some of your hardware components or use a different computer.

        -

        Step 3: Download and install the game

        -

        The third thing you need to do is download and install the game on your computer. To do this, follow these steps:

        -
          -
        1. Go to Old-Games.com (https://www.old-games.com/download/6148/dogz-5) and click on the "Download" button.
        2. -
        3. Select a download option from the list. You can choose between direct download or torrent download. Direct download is faster but requires a premium account. Torrent download is slower but free.
        4. -
        5. If you choose direct download, enter your email address and password if you have a premium account. If not, click on "Create an account" and follow the instructions.
        6. -
        7. If you choose torrent download, make sure you have a torrent client installed on your computer. If not, download one from the internet (such as uTorrent or BitTorrent) and install it.
        8. -
        9. Wait for the download to finish. The file size is about 300 MB.
        10. -
        11. Once the download is complete, locate the file on your computer and extract it using a file archiver (such as WinRAR or 7-Zip).
        12. -
        13. Open the extracted folder and run the setup.exe file.
        14. -
        15. Follow the installation wizard's instructions and choose a destination folder for the game.
        16. -
        17. Wait for the installation to finish.
        18. -
        19. Congratulations! You have successfully installed Dogz 5 on your computer.
        20. -
        -

        Step 4: Enjoy playing with your virtual pets

        -

        The last thing you need to do is enjoy playing with your virtual pets. To do this, follow these steps:

        -
          -
        1. Go to the destination folder where you installed the game and run the Petz.exe file.
        2. -
        3. Select a language from the menu (English, French, German, Italian, or Spanish).
        4. Select "Play" from the main menu.
        5. Select "Adopt" from the sub-menu if you want to adopt a new dog or "Load" if you want to load an existing one.
        6. Select a breed from the list (there are over 40 breeds available).
        7. Select a gender (male or female).
        8. Select a name for your dog (you can type it or choose from a list).
        9. Select an age for your dog (puppy or adult).
        10. Select "OK" to confirm your choices.
        11. You will see your new dog appear on the screen in its own room.
        12. You can interact with your dog using various tools from the toolbar at the bottom of the screen (such as food bowl, water bowl, brush, ball, etc.). You can also click on your dog to pet it or pick it up.
        13. You can switch between different rooms by clicking on the arrows at the top of the screen (such as kitchen, bedroom, backyard, etc.). You can also customize each room by clicking on the paintbrush icon at the bottom right corner of the screen.
        14. You can access other options by clicking on the menu icon at the bottom left corner of the screen (such as save, load, options, quit, etc.).

        Tips and tricks for playing Dogz 5Is Dogz 5 compatible with Windows 10? -

        Dogz 5 is compatible with Windows 10, but you might need to run it in compatibility mode for older versions of Windows. To do this, follow these steps:

        -
          -
        1. Right-click on the Petz.exe file and select "Properties".
        2. -
        3. Go to the "Compatibility" tab and check the box that says "Run this program in compatibility mode for".
        4. -
        5. Select a Windows version from the drop-down menu (such as Windows XP or Windows 7).
        6. -
        7. Click on "Apply" and then on "OK".
        8. -
        9. Run the game as usual.
        10. -
        -

        How many dogz can I have in Dogz 5?

        -

        You can have up to five dogz in Dogz 5 at a time. However, you can adopt more dogz and store them in the adoption center. You can also exchange dogz with other players online or delete dogz that you don't want anymore.

        -

        How can I save my game progress in Dogz 5?

        -

        You can save your game progress in Dogz 5 by clicking on the menu icon at the bottom left corner of the screen and selecting "Save". You can also use the keyboard shortcut Ctrl+S. You can load your saved game by clicking on the menu icon and selecting "Load". You can also use the keyboard shortcut Ctrl+L.

        -

        How can I uninstall Dogz 5 from my computer?

        -

        You can uninstall Dogz 5 from your computer by following these steps:

        -
          -
        1. Go to the Control Panel and select "Programs and Features".
        2. -
        3. Find Dogz 5 from the list of programs and click on it.
        4. -
        5. Click on "Uninstall" and follow the instructions.
        6. -
        7. Delete any remaining files or folders related to Dogz 5 from your computer.
        8. -
        -

        Where can I find more information about Dogz 5?

        -

        You can find more information about Dogz 5 by visiting these websites:

        -
          -
        • The official website of Petz (https://petz.com/)
        • -
        • The official forum of Petz (https://forums.ubisoft.com/forumdisplay.php/25-Petz)
        • -
        • The fan website of Petz (http://www.sherlocksoftware.org/page.php?id=1)
        • -
        • The fan wiki of Petz (https://petz.fandom.com/wiki/Petz_Wiki)
        • -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Easy and Delicious Crack Chicken Recipes without Bacon.md b/spaces/tialenAdioni/chat-gpt-api/logs/Easy and Delicious Crack Chicken Recipes without Bacon.md deleted file mode 100644 index 4ae2f8e2ed58dab1fcd22cce99c507747a0d1567..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Easy and Delicious Crack Chicken Recipes without Bacon.md +++ /dev/null @@ -1,31 +0,0 @@ - -

        How to Make Crack Chicken without Bacon

        -

        Crack chicken is a popular dish that consists of chicken, cream cheese, ranch dressing, and bacon. It is usually cooked in a slow cooker or an instant pot and served over rice, pasta, or bread. It is creamy, cheesy, and delicious.

        -

        crack chicken no bacon


        Download Filehttps://urlcod.com/2uK38r



        -

        However, if you are looking for a healthier or vegetarian version of crack chicken, you may want to omit the bacon. But how can you make crack chicken without bacon and still keep it flavorful and satisfying? Here are some tips and tricks to help you out.

        -

        Use a Substitute for Bacon

        -

        One way to make crack chicken without bacon is to use a substitute that can mimic the texture and taste of bacon. Some examples are:

        -
          -
        • Turkey bacon: This is a leaner and lower-calorie alternative to pork bacon. It has a similar smoky flavor and crispiness. You can cook it in a skillet or in the oven and then chop it up and add it to your crack chicken.
        • -
        • Coconut bacon: This is a vegan option that uses coconut flakes that are seasoned with liquid smoke, soy sauce, maple syrup, and other spices. You can bake them in the oven until they are crispy and then crumble them over your crack chicken.
        • -
        • Mushroom bacon: This is another vegan option that uses thinly sliced mushrooms that are marinated in soy sauce, liquid smoke, maple syrup, and other seasonings. You can cook them in a skillet until they are browned and crispy and then sprinkle them over your crack chicken.
        • -
        -

        Add More Seasonings and Spices

        -

        Another way to make crack chicken without bacon is to add more seasonings and spices to your chicken mixture. This can help enhance the flavor and make it more interesting. Some examples are:

        -
          -
        • Garlic: This is a classic ingredient that can add a lot of flavor and aroma to any dish. You can use fresh garlic cloves or garlic powder. You can also use roasted garlic for a sweeter and milder taste.
        • -
        • Onion: This is another staple ingredient that can add some sweetness and depth to your crack chicken. You can use fresh onion or onion powder. You can also use green onions or chives for some color and freshness.
        • -
        • Paprika: This is a spice that can add some color and smokiness to your crack chicken. You can use regular paprika or smoked paprika. You can also use cayenne pepper or chili powder for some heat.
        • -
        -

        Add Some Veggies

        -

        A third way to make crack chicken without bacon is to add some veggies to your chicken mixture. This can help add some nutrition and texture to your dish. Some examples are:

        -

        -
          -
        • Broccoli: This is a green vegetable that can add some crunch and fiber to your crack chicken. You can use fresh or frozen broccoli florets. You can also use cauliflower for a similar effect.
        • -
        • Spinach: This is a leafy green vegetable that can add some iron and vitamins to your crack chicken. You can use fresh or frozen spinach leaves. You can also use kale or Swiss chard for a similar effect.
        • -
        • Corn: This is a yellow vegetable that can add some sweetness and starch to your crack chicken. You can use fresh or canned corn kernels. You can also use peas or carrots for a similar effect.
        • -
        -

        Conclusion

        -

        In conclusion, you can make crack chicken without bacon by using a substitute for bacon, adding more seasonings and spices, or adding some veggies. These tips and tricks can help you create a healthier or vegetarian version of crack chicken that is still creamy, cheesy, and delicious.

        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Ares Dark The Ultimate Black Icon Pack for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Ares Dark The Ultimate Black Icon Pack for Android.md deleted file mode 100644 index 1803ccc5814233a97aa2062dfe9c98b7bf4e1301..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Ares Dark The Ultimate Black Icon Pack for Android.md +++ /dev/null @@ -1,108 +0,0 @@ - -

        Ares Dark Icon Pack APK: A Review

        -

        If you are looking for a new way to customize your Android device, you might want to check out Ares Dark Icon Pack APK. This is a personalization app that offers dark squircle-shaped icons for your homescreen and app drawer. In this article, we will review the features, pros and cons, and how to download and install Ares Dark Icon Pack APK on your device.

        -

        ares dark icon pack apk


        Download Ziphttps://bltlly.com/2uOsyD



        -

        What is Ares Dark Icon Pack APK?

        -

        Ares Dark Icon Pack APK is a personalization app developed by One4Studio, a team of designers and developers who create high-quality icon packs for Android devices. Ares Dark Icon Pack APK is one of their products, which features over 6000 elegant and consistent icons that are created by a community from all around the world. The icons are designed to be dark, minimal, and squircle-shaped, which means they have rounded corners and edges. The icons also have a subtle shadow effect that adds some depth and contrast to them.

        -

        Features of Ares Dark Icon Pack APK

        -

        Some of the features of Ares Dark Icon Pack APK are:

        -
          -
        • Over 6000 icons in dark squircle shape
        • -
        • Supports over 30 launchers, including Nova, Apex, Action, ADW, Go, Lawnchair, Smart, and more
        • -
        • Includes wallpapers, widgets, folders, and dynamic calendar icons
        • -
        • Has an icon request tool that allows you to request missing icons or suggest new ones
        • -
        • Has an icon mask that applies the same shape and style to unthemed icons
        • -
        • Has frequent updates that add new icons and fix bugs
        • -
        -

        How to download and install Ares Dark Icon Pack APK

        -

        To download and install Ares Dark Icon Pack APK on your device, you need to follow these steps:

        -
          -
        1. Go to the [download page](^2^) of Ares Dark Icon Pack APK on AppBrain and click on the "Download" button
        2. -
        3. Allow the download to complete and then open the downloaded file
        4. -
        5. If prompted, enable the installation from unknown sources in your device settings
        6. -
        7. Follow the instructions on the screen to install the app
        8. -
        9. Open the app and grant the necessary permissions
        10. -
        11. Select your preferred launcher from the list and apply the icon pack
        12. -
        13. Enjoy your new dark squircle icons!
        14. -
        -

        Pros and cons of Ares Dark Icon Pack APK

        -

        Like any other app, Ares Dark Icon Pack APK has its pros and cons. Here are some of them:

        -

        ares dark icon pack apk download
        -ares dark icon pack apk free
        -ares dark icon pack apk mod
        -ares dark icon pack apk pro
        -ares dark icon pack apk premium
        -ares dark icon pack apk latest version
        -ares dark icon pack apk full
        -ares dark icon pack apk cracked
        -ares dark icon pack apk unlocked
        -ares dark icon pack apk patched
        -ares black icon pack apk download
        -ares black icon pack apk free
        -ares black icon pack apk mod
        -ares black icon pack apk pro
        -ares black icon pack apk premium
        -ares black icon pack apk latest version
        -ares black icon pack apk full
        -ares black icon pack apk cracked
        -ares black icon pack apk unlocked
        -ares black icon pack apk patched
        -download ares dark icon pack for android
        -download ares black icon pack for android
        -how to install ares dark icon pack on android
        -how to install ares black icon pack on android
        -how to use ares dark icon pack on android
        -how to use ares black icon pack on android
        -best launcher for ares dark icon pack
        -best launcher for ares black icon pack
        -one4studio ares dark icon pack review
        -one4studio ares black icon pack review
        -one4studio ares dark icon pack features
        -one4studio ares black icon pack features
        -one4studio ares dark icon pack support
        -one4studio ares black icon pack support
        -one4studio ares dark icon pack update
        -one4studio ares black icon pack update
        -one4studio ares dark icon pack refund policy
        -one4studio ares black icon pack refund policy
        -one4studio ares dark icon pack alternatives
        -one4studio ares black icon pack alternatives
        -squircle android icons - ares dark and black packs by one4studio
        -squircle glyph icons - ares dark and black packs by one4studio
        -squircle colorful icons - ares dark and black packs by one4studio
        -squircle minimalist icons - ares dark and black packs by one4studio
        -squircle elegant icons - ares dark and black packs by one4studio
        -squircle modern icons - ares dark and black packs by one4studio
        -squircle simple icons - ares dark and black packs by one4studio
        -squircle stylish icons - ares dark and black packs by one4studio

        -

        Pros

        -

        Elegant and consistent icons

        -

        The icons in Ares Dark Icon Pack APK are designed to be elegant and consistent. They have a dark color scheme that matches well with most wallpapers and themes. They also have a squircle shape that gives them a modern and sleek look. The icons are also detailed and clear, making them easy to recognize and use.

        -

        Wide range of supported launchers

        -

        Ares Dark Icon Pack APK supports over 30 launchers, which means you can use it with almost any launcher you prefer. Some of the supported launchers are Nova, Apex, Action, ADW, Go, Lawnchair, Smart, and more. You can also use it with other launchers that support icon packs by using a third-party app like Awesome Icons or Unicon.

        -

        Frequent updates and requests

        -

        Ares Dark Icon Pack APK has frequent updates that add new icons and fix bugs. It also has an icon request tool that allows you to request missing icons or suggest new ones. The developers are responsive and friendly, and they try to fulfill the requests as soon as possible. You can also join their Telegram group to get the latest news and updates about their icon packs.

        -

        Cons

        -

        Not compatible with some devices

        -

        Ares Dark Icon Pack APK may not be compatible with some devices, especially older ones or those with low specifications. Some users have reported issues with the app crashing, not applying, or not showing the icons correctly. If you encounter any problems, you can try to clear the app cache, restart your device, or contact the developers for support.

        -

        Requires a third-party app to apply

        -

        Ares Dark Icon Pack APK requires a third-party app to apply the icon pack on your device. This means you need to download and install another app, such as a launcher or an icon changer, to use the icon pack. This may not be convenient for some users who prefer to use the default launcher or who do not want to install extra apps.

        -

        May not match some wallpapers or themes

        -

        Ares Dark Icon Pack APK has a dark color scheme that may not match some wallpapers or themes. If you have a bright or colorful wallpaper or theme, the icons may look out of place or hard to see. You may need to change your wallpaper or theme to match the icons, or use a different icon pack that suits your preference.

        -

        Conclusion

        -

        Ares Dark Icon Pack APK is a personalization app that offers dark squircle-shaped icons for your Android device. It has over 6000 icons that are elegant and consistent, and it supports over 30 launchers. It also has wallpapers, widgets, folders, and dynamic calendar icons, as well as an icon request tool and an icon mask. However, it also has some drawbacks, such as not being compatible with some devices, requiring a third-party app to apply, and not matching some wallpapers or themes. Overall, Ares Dark Icon Pack APK is a great option for users who like dark and minimal icons, and who want to customize their device with a new look.

        -

        FAQs

        -
          -
        • Q: How much does Ares Dark Icon Pack APK cost?
        • -
        • A: Ares Dark Icon Pack APK is free to download and use. However, it contains ads and in-app purchases that allow you to remove the ads and support the developers.
        • -
        • Q: How can I update Ares Dark Icon Pack APK?
        • -
        • A: You can update Ares Dark Icon Pack APK by visiting the [download page] on AppBrain and downloading the latest version. Alternatively, you can enable automatic updates in your device settings.
        • -
        • Q: How can I uninstall Ares Dark Icon Pack APK?
        • -
        • A: You can uninstall Ares Dark Icon Pack APK by going to your device settings, selecting "Apps", finding Ares Dark Icon Pack APK in the list, and tapping on "Uninstall". You may also need to remove the icon pack from your launcher settings.
        • -
        • Q: How can I contact the developers of Ares Dark Icon Pack APK?
        • -
        • A: You can contact the developers of Ares Dark Icon Pack APK by sending them an email at one4studio@gmail.com or joining their Telegram group at https://t.me/one4studiochat.
        • -
        • Q: How can I rate and review Ares Dark Icon Pack APK?
        • -
        • A: You can rate and review Ares Dark Icon Pack APK by visiting the [download page] on AppBrain and clicking on the "Rate" button. You can also leave a comment with your feedback and suggestions.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Atomic Heart 3D Model Review Why You Should Download It Now.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Atomic Heart 3D Model Review Why You Should Download It Now.md deleted file mode 100644 index 0baf369adbd990e95a06bc6d95c044c073d029cf..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Atomic Heart 3D Model Review Why You Should Download It Now.md +++ /dev/null @@ -1,87 +0,0 @@ - -

        How to Download and Create Amazing 3D Models from Atomic Heart

        -

        Atomic Heart is an upcoming first-person shooter game set in an alternate history Soviet Union, where you fight against mutated creatures, rogue robots, and mysterious experiments. The game features stunning graphics, immersive gameplay, and a rich story that will keep you hooked for hours.

        -

        But what if you want to explore the world of Atomic Heart beyond the game? What if you want to create your own characters and scenes inspired by the game's retrofuturistic aesthetic? Or what if you want to download some of the amazing 3D models from the game and use them for your own projects?

        -

        atomic heart 3d model download


        Download --->>> https://bltlly.com/2uOriZ



        -

        In this article, we will show you how to do all that and more. We will introduce you to some of the best free 3D modeling software tools that you can use to download and create your own 3D models from Atomic Heart. We will also give you some tips and tricks on how to edit and animate your 3D models using Blender, a powerful and open source 3D creation software. By the end of this article, you will be able to unleash your creativity and make your own Atomic Heart-inspired 3D creations.

        -

        How to Download Free 3D Models from Atomic Heart

        -

        One of the easiest ways to get your hands on some of the awesome 3D models from Atomic Heart is to use Sketchfab, a web-based platform that lets you view, buy, and download millions of 3D models from various categories. Sketchfab has a dedicated tag for Atomic Heart, where you can find dozens of high-quality 3D models from the game, such as characters, weapons, vehicles, environments, and more.

        -

        To download free 3D models from Atomic Heart using Sketchfab, follow these simple steps:

        -
          -
        1. Go to [Sketchfab](^2^) and search for "Atomic Heart" in the search bar.
        2. -
        3. Browse through the results and find a 3D model that you like. You can use the filters on the left side to narrow down your search by category, license, price, etc.
        4. -
        5. Click on the 3D model that you want to download. You will be taken to a page where you can view the model in 3D, rotate it, zoom in and out, change the lighting, etc.
        6. -
        7. If the model is free to download, you will see a "Download" button on the right side of the page. Click on it and choose a file format that suits your needs. You can choose from various formats such as OBJ, FBX, STL, GLTF, etc.
        8. -
        9. Save the file to your computer and enjoy your free 3D model from Atomic Heart.
        10. -
        -

        Here is an example of a free 3D model from Atomic Heart that you can download from Sketchfab:

        -The Twins | Atomic Heart - Download Free 3D model by LLIypuk -

        The Twins | Atomic Heart - Download Free 3D model by LLIypuk

        -

        How to Use Daz 3D to Create Your Own Characters and Scenes Inspired by Atomic Heart

        -

        If you want to create your own characters and scenes inspired by Atomic Heart, one of the best free 3D modeling software tools that you can use is Daz 3D. Daz 3D is a powerful and easy-to-use tool that lets you create realistic human figures and poses using a huge library of pre-built content. You can customize every aspect of your characters, such as their appearance, clothing, accessories, hair, expressions, etc. To create your own characters and scenes inspired by Atomic Heart using Daz 3D, follow these simple steps:

        -
          -
        1. Go to [Daz 3D] and download the free Daz Studio software. Install it on your computer and launch it.
        2. -
        3. In Daz Studio, you will see a default scene with a human figure. You can use the tabs on the left side to browse through the content library and find different items that you can add to your scene, such as clothing, hair, props, environments, etc.
        4. -
        5. To customize your character, you can use the sliders on the right side to adjust various parameters, such as morphs, poses, expressions, shaders, etc. You can also use the tools on the top bar to move, rotate, scale, and manipulate your character and other items in your scene.
        6. -
        7. To create a scene inspired by Atomic Heart, you can look for items that match the game's style and theme, such as Soviet uniforms, weapons, robots, etc. You can also use the search function to find specific items that you want.
        8. -
        9. Once you are happy with your character and scene, you can render it using the render settings on the right side. You can choose from different render engines, presets, quality settings, resolution, etc. You can also save your scene as a file for future use or export it as an image or a video.
        10. -
        -

        Here is an example of a character and scene inspired by Atomic Heart that you can create using Daz 3D:

        -

        atomic heart 3d model free download
        -atomic heart 3d model sketchfab
        -atomic heart 3d model artstation
        -atomic heart 3d model c4d
        -atomic heart 3d model arnold
        -atomic heart 3d model mecha
        -atomic heart 3d model character
        -atomic heart 3d model animation
        -atomic heart 3d model vfx
        -atomic heart 3d model hard surface
        -atomic heart 3d model cgi
        -atomic heart 3d model cinema4d
        -atomic heart 3d model digital
        -atomic heart 3d model game
        -atomic heart 3d model lliypuk
        -atomic heart 3d model the twins
        -atomic heart 3d model nora
        -atomic heart 3d model cosmo894
        -atomic heart 3d model promens894
        -atomic heart 3d model license
        -atomic heart 3d model triangles
        -atomic heart 3d model vertices
        -atomic heart 3d model buy
        -atomic heart 3d model view
        -atomic heart 3d model ready

        -Atomic Heart Inspired Character and Scene - Created with Daz 3D -

        Atomic Heart Inspired Character and Scene - Created with Daz 3D

        -

        How to Use Blender to Edit and Animate Your 3D Models from Atomic Heart

        -

        If you want to edit and animate your 3D models from Atomic Heart, one of the best free and open source 3D creation software tools that you can use is Blender. Blender is a powerful and versatile tool that lets you do everything from modeling, sculpting, texturing, rigging, animating, rendering, compositing, video editing, and more.

        -

        To edit and animate your 3D models from Atomic Heart using Blender, follow these simple steps:

        -
          -
        1. Go to [Blender] and download the free Blender software. Install it on your computer and launch it.
        2. -
        3. In Blender, you will see a default scene with a cube. You can delete the cube by pressing X and confirming with Enter. You can also use the menu on the top left to create a new scene or open an existing one.
        4. -
        5. To import your 3D model from Atomic Heart that you downloaded from Sketchfab or created with Daz 3D, you can use the menu on the top left and choose File > Import > (file format). Navigate to the folder where you saved your file and select it. Your 3D model will appear in your scene.
        6. -
        7. To edit your 3D model, you can use the tabs on the left side to switch between different modes, such as Object Mode, Edit Mode, Sculpt Mode, etc. You can also use the tools on the left side to perform various actions on your model, such as move, rotate, scale, extrude, inset, etc.
        8. -
        9. To animate your 3D model, you can use the timeline on the bottom of the screen to create keyframes for your model's position, rotation, scale, etc. You can also use the dope sheet or the graph editor to edit your keyframes and curves. You can also use the menu on the top right to add modifiers or constraints to your model to create more complex animations.
        10. -
        11. Once you are happy with your 3D model and animation, you can render it using the render settings on the right side. You can choose from different render engines (such as Eevee or Cycles), output formats (such as PNG or MP4), resolution, samples, etc. You can also save your project as a file for future use or export it as an image or a video.
        12. -
        -

        Here is an example of a 3D model from Atomic Heart that you can edit and animate using Blender:

        -Atomic Heart Robot - Edited and Animated with Blender -

        Atomic Heart Robot - Edited and Animated with Blender

        -

        Conclusion

        -

        In this article, we have shown you how to download and create amazing 3D models from Atomic Heart using some of the best free 3D modeling software tools available online. We have also given you some tips and tricks on how to edit and animate your 3D models using Blender, a powerful and open source 3D creation software. By following these steps, you will be able to unleash your creativity and make your own Atomic Heart-inspired 3D creations.

        -

        Whether you are a fan of the game, a 3D modeling enthusiast, or just curious about the world of Atomic Heart, we hope you have enjoyed this article and learned something new. We encourage you to try out the 3D modeling tools that we have introduced and share your creations online with other fans and 3D modelers. You can also check out the official website of Atomic Heart for more information about the game and its development.

        -

        Thank you for reading and happy 3D modeling!

        -

        FAQs

        -

        What is Atomic Heart?

        -

        Atomic Heart is an upcoming first-person shooter game set in an alternate history Soviet Union, where you fight against mutated creatures, rogue robots, and mysterious experiments. The game features stunning graphics, immersive gameplay, and a rich story that will keep you hooked for hours.

        -

        When will Atomic Heart be released?

        -

        Atomic Heart does not have a confirmed release date yet, but it is expected to be released sometime in 2022. The game will be available for PC, PlayStation 4, PlayStation 5, Xbox One, and Xbox Series X/S.

        -

        How can I download free 3D models from Atomic Heart?

        -

        One of the easiest ways to download free 3D models from Atomic Heart is to use Sketchfab, a web-based platform that lets you view, buy, and download millions of 3D models from various categories. Sketchfab has a dedicated tag for Atomic Heart, where you can find dozens of high-quality 3D models from the game, such as characters, weapons, vehicles, environments, and more.

        -

        What are some of the best free 3D modeling software tools that I can use to create my own characters and scenes inspired by Atomic Heart?

        -

        Some of the best free 3D modeling software tools that you can use to create your own characters and scenes inspired by Atomic Heart are Daz 3D and Blender. Daz 3D is a powerful and easy-to-use tool that lets you create realistic human figures and poses using a huge library of pre-built content. Blender is a powerful and versatile tool that lets you do everything from modeling, sculpting, texturing, rigging, animating, rendering, compositing, video editing, and more.

        -

        How can I edit and animate my 3D models from Atomic Heart using Blender?

        -

        To edit and animate your 3D models from Atomic Heart using Blender, you need to import your 3D model into Blender using the File > Import > (file format) menu. Then you can use the tabs on the left side to switch between different modes, such as Object Mode, Edit Mode, Sculpt Mode, etc. You can also use the tools on the left side to perform various actions on your model, such as move, rotate, scale, extrude, inset, etc. To animate your 3D model, you need to use the timeline on the bottom of the screen to create keyframes for your model's position, rotation, scale, etc. You can also use the dope sheet or the graph editor to edit your keyframes and curves. You can also use the menu on the top right to add modifiers or constraints to your model to create more complex animations.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Bubble Shooter 2 MOD APK and Play with No Ads.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Bubble Shooter 2 MOD APK and Play with No Ads.md deleted file mode 100644 index d20a8e0746e16dfe2d5d5d6f1c053597dfd81409..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Bubble Shooter 2 MOD APK and Play with No Ads.md +++ /dev/null @@ -1,100 +0,0 @@ -
        -

        Download Bubble Shooter 2 Mod Apk: A Fun and Addictive Game for Everyone

        -

        If you are looking for a fun and relaxing game to play on your Android device, you should try Bubble Shooter 2. This is a classic arcade game that will keep you entertained for hours. You can download Bubble Shooter 2 mod apk from this article and enjoy unlimited features and benefits.

        -

        download bubble shooter 2 mod apk


        DOWNLOAD 🆓 https://bltlly.com/2uOp3q



        -

        What is Bubble Shooter 2?

        -

        Bubble Shooter 2 is a simple but addictive game where you have to shoot and pop colorful bubbles on the screen. The game has hundreds of levels with different challenges and objectives. You can also play with your friends and compete for the highest score.

        -

        Features of Bubble Shooter 2

        -

        Bubble Shooter 2 has many features that make it a fun and enjoyable game. Here are some of them:

        -
          -
        • Beautiful graphics and animations
        • -
        • Smooth gameplay and controls
        • -
        • Various modes and themes
        • -
        • Daily rewards and bonuses
        • -
        • Leaderboards and achievements
        • -
        • Offline mode
        • -
        -

        How to play Bubble Shooter 2

        -

        The game is very easy to play. You just have to tap on the screen where you want to shoot your bubble. You have to match at least three bubbles of the same color to pop them and clear the board. You can also use special bubbles that have different effects, such as bombs, fireballs, rainbow bubbles, etc.

        -

        Why download Bubble Shooter 2 mod apk?

        -

        If you want to have more fun and excitement in playing Bubble Shooter 2, you should download the mod apk version of the game. The mod apk will give you access to unlimited features and benefits, such as:

        -
          -
        • Unlimited coins and gems
        • -
        • Unlocked all levels and modes
        • -
        • No ads
        • -
        • No root required
        • -
        -

        How to download and install Bubble Shooter 2 mod apk

        -

        To download and install Bubble Shooter 2 mod apk, you have to follow these simple steps:

        -

        How to download bubble shooter 2 mod apk for free
        -Bubble shooter 2 mod apk unlimited coins and lives
        -Bubble shooter 2 mod apk latest version 2023
        -Bubble shooter 2 hack mod apk download
        -Bubble shooter 2 mod apk no ads
        -Download bubble shooter 2 mod apk for android
        -Bubble shooter 2 mod apk offline
        -Bubble shooter 2 mod apk with unlimited boosters
        -Bubble shooter 2 premium mod apk download
        -Bubble shooter 2 mod apk revdl
        -Download bubble shooter 2 mod apk for pc
        -Bubble shooter 2 mod apk unlimited everything
        -Bubble shooter 2 mod apk rexdl
        -Bubble shooter 2 pro mod apk download
        -Bubble shooter 2 mod apk online
        -Download bubble shooter 2 mod apk for ios
        -Bubble shooter 2 mod apk unlimited money and gems
        -Bubble shooter 2 mod apk happymod
        -Bubble shooter 2 vip mod apk download
        -Bubble shooter 2 mod apk multiplayer
        -Download bubble shooter 2 mod apk for windows
        -Bubble shooter 2 mod apk all levels unlocked
        -Bubble shooter 2 mod apk android 1
        -Bubble shooter 2 mega mod apk download
        -Bubble shooter 2 mod apk full version
        -Download bubble shooter 2 mod apk for mac
        -Bubble shooter 2 mod apk unlimited stars and coins
        -Bubble shooter 2 mod apk apkpure
        -Bubble shooter 2 deluxe mod apk download
        -Bubble shooter 2 mod apk new update
        -Download bubble shooter 2 mod apk for laptop
        -Bubble shooter 2 mod apk unlimited moves and lives
        -Bubble shooter 2 mod apk an1
        -Bubble shooter 2 gold mod apk download
        -Bubble shooter 2 mod apk no root
        -Download bubble shooter 2 mod apk for chromebook
        -Bubble shooter 2 mod apk unlimited power ups and coins
        -Bubble shooter 2 mod apk mob.org
        -Bubble shooter 2 plus mod apk download
        -Bubble shooter 2 mod apk old version

        -

        Step 1: Download the mod apk file from a trusted source

        -

        You can download the mod apk file from this link. This is a safe and reliable source that will provide you with the latest version of the mod apk.

        -

        Step 2: Enable unknown sources on your device

        -

        Before you can install the mod apk file, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

        -

        Step 3: Install the mod apk file and enjoy the game

        -

        Now, you can install the mod apk file by tapping on it and following the instructions. Once the installation is complete, you can open the game and enjoy all the features and benefits of the mod apk.

        -

        Tips and tricks for playing Bubble Shooter 2

        -

        To make the most out of your gaming experience, here are some tips and tricks for playing Bubble Shooter 2:

        -

        Aim carefully and use the walls to bounce your bubbles

        -

        The key to success in Bubble Shooter 2 is to aim carefully and use the walls to bounce your bubbles. This will help you reach the bubbles that are hard to get and create more combos. You can also see the next bubble that you will shoot at the bottom of the screen, so you can plan your moves accordingly.

        -

        Use power-ups and boosters to clear difficult levels

        -

        Sometimes, you may encounter levels that are too difficult or challenging to clear with normal bubbles. In these cases, you can use power-ups and boosters to help you out. Power-ups are special bubbles that have different effects, such as bombs, fireballs, rainbow bubbles, etc. You can get them by popping certain bubbles or by buying them with coins. Boosters are items that you can use before or during the game, such as extra moves, color balls, aim line, etc. You can get them by completing missions or by buying them with gems.

        -

        Collect coins and gems to unlock new features and items

        -

        As you play the game, you will earn coins and gems that you can use to unlock new features and items. Coins are used to buy power-ups and themes, while gems are used to buy boosters and lives. You can collect coins and gems by popping bubbles, completing levels, watching ads, spinning the wheel, or buying them with real money.

        -

        Conclusion

        -

        Bubble Shooter 2 is a fun and addictive game that you can play on your Android device. You can download Bubble Shooter 2 mod apk from this article and enjoy unlimited features and benefits. The game has beautiful graphics, smooth gameplay, various modes and themes, daily rewards and bonuses, leaderboards and achievements, offline mode, and more. You can also play with your friends and compete for the highest score. The game is easy to play but hard to master. You have to shoot and pop colorful bubbles on the screen and clear the board. You can also use special bubbles, power-ups, and boosters to clear difficult levels. You can collect coins and gems to unlock new features and items. The game is suitable for everyone, regardless of age or skill level. If you are looking for a relaxing and entertaining game to play on your Android device, you should try Bubble Shooter 2.

        -

        FAQs

        -

        Here are some frequently asked questions about Bubble Shooter 2:

        -
          -
        1. Q: Is Bubble Shooter 2 free to play?
        2. -
        3. A: Yes, Bubble Shooter 2 is free to play. However, it contains in-app purchases that allow you to buy coins, gems, lives, power-ups, boosters, themes, etc.
        4. -
        5. Q: How can I play Bubble Shooter 2 with my friends?
        6. -
        7. A: You can play Bubble Shooter 2 with your friends by connecting your Facebook account to the game. This will allow you to see your friends' scores and challenge them to beat your score.
        8. -
        9. Q: How can I save my progress in Bubble Shooter 2?
        10. -
        11. A: You can save your progress in Bubble Shooter 2 by connecting your Google Play Games account to the game. This will allow you to sync your progress across different devices.
        12. -
        13. Q: How can I contact the developers of Bubble Shooter 2?
        14. -
        15. A: You can contact the developers of Bubble Shooter 2 by sending an email to support@bubbleshooter.com or by visiting their website at www.bubbleshooter.com.
        16. -
        17. Q: How can I update Bubble Shooter 2?
        18. -
        19. A: You can update Bubble Shooter 2 by visiting the Google Play Store and downloading the latest version of the game.
        20. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/Fisiologia Humana Silverthorn Descargar Gratis.md b/spaces/tioseFevbu/cartoon-converter/Fisiologia Humana Silverthorn Descargar Gratis.md deleted file mode 100644 index 152d29a24c7580fd3d54dcf51ee2432665838414..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/Fisiologia Humana Silverthorn Descargar Gratis.md +++ /dev/null @@ -1,58 +0,0 @@ -## fisiologia humana silverthorn descargar gratis - - - - - - ![Fisiologia Humana Silverthorn Descargar Gratis](https://img94.xooimage.com/files/0/6/f/fisiologa-humana-...verthorn-3c42d97.jpg) - - - - - -**Download File >>>>> [https://tinourl.com/2txBqC](https://tinourl.com/2txBqC)** - - - - - - - - - - - - Here is a possible title and article with html formatting for the keyword "fisiologia humana silverthorn descargar gratis": - -# ¿Dónde descargar gratis el libro de Fisiología Humana de Silverthorn? - - - -El libro de Fisiología Humana de Silverthorn es una obra de referencia para el estudio de la fisiología humana, que aborda los conceptos básicos y los avances más recientes de esta disciplina. El libro tiene un enfoque integrado que relaciona la fisiología con la anatomía, la bioquímica, la genética y la biología celular. Además, incluye numerosos recursos didácticos, como ilustraciones, tablas, esquemas, casos clínicos, preguntas de autoevaluación y un sitio web complementario. - - - -Si quieres descargar gratis el libro de Fisiología Humana de Silverthorn en formato PDF, puedes hacerlo desde algunos sitios web que lo ofrecen como Scribd[^1^], Archive[^3^] o Libros Medicos PDF. Sin embargo, debes tener en cuenta que estos sitios no cuentan con la autorización de los autores o de la editorial para distribuir el libro, por lo que podrían estar infringiendo los derechos de autor. Por eso, te recomendamos que adquieras el libro de forma legal, comprándolo en una librería o en una plataforma digital como Amazon o Google Play. Así podrás disfrutar del libro con la mejor calidad y apoyar el trabajo de los autores y de la editorial. - - - -El libro de Fisiología Humana de Silverthorn es una excelente opción para aprender sobre el funcionamiento del cuerpo humano y sus mecanismos de regulación. Esperamos que esta información te haya sido útil y que puedas acceder al libro de forma segura y responsable. - -Here are a few more paragraphs with html formatting for the article: - -El libro de Fisiología Humana de Silverthorn está dividido en siete unidades que abarcan los principales temas de la fisiología humana: introducción a la fisiología, fisiología celular, neurofisiología, fisiología cardiovascular, fisiología respiratoria, fisiología renal y de los líquidos corporales, y fisiología endocrina y reproductiva. Cada unidad se compone de varios capítulos que desarrollan los conceptos teóricos y los aplican a situaciones clínicas o experimentales. Al final de cada capítulo hay un resumen de los puntos clave, un glosario de términos y una serie de preguntas para repasar y evaluar lo aprendido. - - - -El libro también cuenta con un sitio web complementario que ofrece recursos adicionales para el estudio y la práctica de la fisiología humana. Entre estos recursos se encuentran: animaciones, videos, simulaciones, ejercicios interactivos, casos clínicos, lecturas complementarias y autoevaluaciones. El acceso al sitio web se realiza mediante un código que se encuentra en la primera página del libro impreso o en la versión digital. - - - -El libro de Fisiología Humana de Silverthorn es una obra reconocida internacionalmente por su rigor científico, su claridad expositiva y su enfoque integrado e innovador. Es una herramienta indispensable para los estudiantes y profesionales de las ciencias de la salud que quieran profundizar en el conocimiento del funcionamiento del cuerpo humano y sus mecanismos de adaptación y regulación. - - dfd1c89656 - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Halimbawa Ng Maikling Kwento Para Sa Grade 3 Zip.md b/spaces/tioseFevbu/cartoon-converter/scripts/Halimbawa Ng Maikling Kwento Para Sa Grade 3 Zip.md deleted file mode 100644 index c94fe3f292c7412aa66c55a9a059009b6e2661f1..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Halimbawa Ng Maikling Kwento Para Sa Grade 3 Zip.md +++ /dev/null @@ -1,42 +0,0 @@ -
        -I can try to write a title and an article for you, but I cannot guarantee that they will be SEO optimized or HTML formatted. Here is what I came up with: - -

        Halimbawa Ng Maikling Kwento Para Sa Grade 3 Zip: Mga Kuwentong Pambata Na Makabuluhan at Masaya

        - -

        Ang mga maikling kwento ay isa sa mga pinakamagandang paraan para magbigay ng aral at aliw sa mga bata. Ang mga kwentong ito ay madalas na naglalaman ng mga tauhan, tagpuan, suliranin, at kakalasan na makakaapekto sa buhay ng mga mambabasa. Ang mga maikling kwento ay maaari ring magpakita ng iba't ibang aspeto ng kultura, tradisyon, at paniniwala ng mga Pilipino.

        -

        Halimbawa Ng Maikling Kwento Para Sa Grade 3 Zip


        DOWNLOAD ••• https://urlcod.com/2uHwUA



        - -

        Sa artikulong ito, ibabahagi namin sa inyo ang ilang halimbawa ng maikling kwento para sa grade 3 zip na maaari ninyong basahin at gamitin sa inyong pagtuturo o pag-aaral. Ang mga kwentong ito ay may iba't ibang tema at mensahe na makatutulong sa pagpapaunlad ng moral, kaisipan, at damdamin ng mga bata.

        - -

        Ang Alamat ng Pinya

        - -

        Isa sa mga kilalang halimbawa ng maikling kwento para sa grade 3 zip ay ang alamat ng pinya. Ang kwentong ito ay tungkol sa isang batang babae na si Pina na sobrang tamad at masungit. Hindi siya marunong tumulong sa kanyang ina sa mga gawaing bahay at laging nagrereklamo. Dahil dito, pinarusahan siya ng kanyang ina at ginawang prutas na may maraming mata.

        - -

        Ang aral na maibibigay ng kwentong ito ay ang kahalagahan ng pagiging masipag, matulungin, at mabait. Ito ay nagpapakita rin ng paggalang sa mga magulang at pagpapahalaga sa mga biyaya ng kalikasan.

        - -

        Ang Pagong at ang Matsing

        - -

        Isa pang halimbawa ng maikling kwento para sa grade 3 zip ay ang pagong at ang matsing. Ang kwentong ito ay tungkol sa isang pagkakaibigan na nasira dahil sa kasakiman at panlilinlang. Ang matsing ay naisamantala ang kabaitan at kahinaan ng pagong at inagaw ang kanyang bahagi sa saging na kanilang hinati. Ngunit hindi siya nakuntento at sinubukan pa niyang kunin ang bahay ng pagong sa ilalim ng dagat. Sa huli, nabigo siya at natuluyang nawalan ng kaibigan.

        - -

        Ang aral na maibibigay ng kwentong ito ay ang kahalagahan ng pagiging tapat, makatarungan, at mapagbigay. Ito ay nagpapakita rin ng pag-iwas sa kasakiman, panloloko, at pang-aapi.

        - -

        Ang Ibong Adarna

        - -

        Isa rin sa mga sikat na halimbawa ng maikling kwento para sa grade 3 zip ay ang ibong adarna. Ang kwentong ito ay tungkol sa isang mahiwagang ibon na may kakayahang magpagaling ng sakit at magpabago ng anyo. Ang tatlong prinsipe ng Berbanya ay naglakbay upang hanapin ang ibon para gamutin ang kanilang amang hari na may malubhang karamdaman. Sa kanilang paglalakbay, sila ay nakaranas ng iba't ibang pagsubok, panganib, at tukso. Sa huli, si Don Juan ang nakahuli sa ibon at nakabalik sa kaharian.

        - -

        Ang aral na maibibigay ng kwOkay, I will try to write a few more paragraphs for you. Here they are: - -

        Ang Mga Kuwento ni Lola Basyang

        - -

        Isa sa mga pinakatanyag na halimbawa ng maikling kwento para sa grade 3 zip ay ang mga kuwento ni Lola Basyang. Ang mga kuwentong ito ay likha ni Severino Reyes na kilala rin bilang Lola Basyang. Ang mga kuwentong ito ay madalas na naglalaman ng mga elemento ng kababalaghan, katatawanan, at romansa. Ang ilan sa mga sikat na kuwento ni Lola Basyang ay ang "Ang Prinsipeng Mahaba ang Ilong", "Ang Prinsesa ng mga Ibon", "Ang Mahiwagang Biyulin", at "Ang Plautin ni Periking".

        -

        - -

        Ang aral na maibibigay ng mga kuwentong ito ay ang kahalagahan ng pagmamahal, pagtitiwala, pagpapakumbaba, at pagkakaroon ng malawak na imahinasyon. Ito ay nagpapakita rin ng pagpapahalaga sa mga kaugalian at pamana ng mga Pilipino.

        - -

        Ang Alamat ng Sampaguita

        - -

        Isa pang halimbawa ng maikling kwento para sa grade 3 zip ay ang alamat ng sampaguita. Ang kwentong ito ay tungkol sa isang magandang dalagang si Dita na minahal ng isang binatang si Hamir. Ngunit hindi sila pinayagan ng kanilang mga magulang na magpakasal dahil sa kanilang pagkakaiba sa relihiyon. Dahil dito, sila ay nagtanan at nagtago sa isang kagubatan. Ngunit hindi sila nakatakas sa galit ng kanilang mga magulang at sila ay pinatay. Sa kanilang libingan, tumubo ang isang bulaklak na may puting kulay at mabangong amoy na tinawag na sampaguita.

        - -

        Ang aral na maibibigay ng kwentong ito ay ang kahalagahan ng pagiging matatag, tapat, at matiyaga sa pag-ibig. Ito ay nagpapakita rin ng paggalang sa iba't ibang pananampalataya at pagkakaisa sa kabila ng mga pagkakaiba.

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Hindi Dhan Dhana Dhan Goal Video Songs 1080p Free Downloadl _TOP_.md b/spaces/tioseFevbu/cartoon-converter/scripts/Hindi Dhan Dhana Dhan Goal Video Songs 1080p Free Downloadl _TOP_.md deleted file mode 100644 index e018e77ad92baa3935309abb9cba68c8c0768439..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Hindi Dhan Dhana Dhan Goal Video Songs 1080p Free Downloadl _TOP_.md +++ /dev/null @@ -1,23 +0,0 @@ - -``` -

        Hindi Dhan Dhana Dhan Goal Video Songs 1080p Free Downloadl: A Review

        -

        Dhan Dhana Dhan Goal is a 2007 Bollywood sports drama film directed by Vivek Agnihotri and starring John Abraham, Bipasha Basu, Arshad Warsi and Boman Irani. The film revolves around a struggling football club in Southall, London, that faces the threat of closure and decides to participate in a league tournament to save their stadium.

        -

        The film features six songs composed by Pritam and sung by various artists such as Daler Mehndi, Javed Ali, Neeraj Shridhar and Richa Sharma. The songs are energetic and catchy, and suit the theme of the film well. The most popular song of the film is "Halla Bol", which is a motivational anthem that plays during the climax of the film. The song is sung by Daler Mehndi and has a powerful chorus that inspires the team to fight for their goal.

        -

        Hindi Dhan Dhana Dhan Goal Video Songs 1080p Free Downloadl


        DOWNLOADhttps://urlcod.com/2uHwO2



        -

        The video songs of the film are also well-made and showcase the talent and charisma of the actors. The songs are shot in different locations such as London, Mumbai and Goa, and have a vibrant and colorful feel to them. The video quality of the songs is also high, as they are available in 1080p resolution for free download on various websites and platforms.

        -

        If you are a fan of Bollywood movies and sports dramas, you should definitely watch Dhan Dhana Dhan Goal and enjoy its video songs. You can download them for free from the links below:

        - -``` - -``` -

        Dhan Dhana Dhan Goal is not just a film about football, but also a film about the challenges and struggles of the South Asian community in Britain. The film portrays the racism and discrimination that the players face from the mainstream society, and how they overcome it with their passion and unity. The film also explores the personal lives and relationships of the characters, and how they balance their dreams and responsibilities.

        -

        The film received mixed reviews from critics and audiences, but was praised for its performances and music. John Abraham was lauded for his portrayal of Jaidev, the captain and star striker of the team, who has to deal with his father's disapproval and his love interest's expectations. Bipasha Basu played the role of Rumana, a successful TV reporter and Jaidev's girlfriend, who supports him in his career but also wants him to settle down. Arshad Warsi played the role of Shaan, the coach and manager of the team, who has a witty and sarcastic personality. Boman Irani played the role of Tony Singh, the owner of the club and a former footballer, who has a heart of gold and a passion for the game.

        -

        Dhan Dhana Dhan Goal is a film that celebrates the spirit of sportsmanship and teamwork, and shows how football can bring people together and change their lives. The film is a must-watch for anyone who loves sports, music and drama.

        -

        -```

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/packaging/markers.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/packaging/markers.py deleted file mode 100644 index 540e7a4dc79d02a820e291b57c43335d5aa25a41..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/packaging/markers.py +++ /dev/null @@ -1,304 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from pip._vendor.pyparsing import ( # noqa: N817 - Forward, - Group, - Literal as L, - ParseException, - ParseResults, - QuotedString, - ZeroOrMore, - stringEnd, - stringStart, -) - -from .specifiers import InvalidSpecifier, Specifier - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node: - def __init__(self, value: Any) -> None: - self.value = value - - def __str__(self) -> str: - return str(self.value) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}('{self}')>" - - def serialize(self) -> str: - raise NotImplementedError - - -class Variable(Node): - def serialize(self) -> str: - return str(self) - - -class Value(Node): - def serialize(self) -> str: - return f'"{self}"' - - -class Op(Node): - def serialize(self) -> str: - return str(self) - - -VARIABLE = ( - L("implementation_version") - | L("platform_python_implementation") - | L("implementation_name") - | L("python_full_version") - | L("platform_release") - | L("platform_version") - | L("platform_machine") - | L("platform_system") - | L("python_version") - | L("sys_platform") - | L("os_name") - | L("os.name") # PEP-345 - | L("sys.platform") # PEP-345 - | L("platform.version") # PEP-345 - | L("platform.machine") # PEP-345 - | L("platform.python_implementation") # PEP-345 - | L("python_implementation") # undocumented setuptools legacy - | L("extra") # PEP-508 -) -ALIASES = { - "os.name": "os_name", - "sys.platform": "sys_platform", - "platform.version": "platform_version", - "platform.machine": "platform_machine", - "platform.python_implementation": "platform_python_implementation", - "python_implementation": "platform_python_implementation", -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker( - marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -class Undefined: - pass - - -_undefined = Undefined() - - -def _get_env(environment: Dict[str, str], name: str) -> str: - value: Union[str, Undefined] = environment.get(name, _undefined) - - if isinstance(value, Undefined): - raise UndefinedEnvironmentName( - f"{name!r} does not exist in evaluation environment." - ) - - return value - - -def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - raise InvalidMarker( - f"Invalid marker: {marker!r}, parse error at " - f"{marker[e.loc : e.loc + 8]!r}" - ) - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/spaces/tomofi/MMOCR/configs/_base_/recog_models/nrtr_modality_transform.py b/spaces/tomofi/MMOCR/configs/_base_/recog_models/nrtr_modality_transform.py deleted file mode 100644 index 3c2e87f4318959d3fb6c1c84c11360ff3dbd4eb1..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/configs/_base_/recog_models/nrtr_modality_transform.py +++ /dev/null @@ -1,11 +0,0 @@ -label_convertor = dict( - type='AttnConvertor', dict_type='DICT36', with_unknown=True, lower=True) - -model = dict( - type='NRTR', - backbone=dict(type='NRTRModalityTransform'), - encoder=dict(type='NRTREncoder', n_layers=12), - decoder=dict(type='NRTRDecoder'), - loss=dict(type='TFLoss'), - label_convertor=label_convertor, - max_seq_len=40) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/cascade_rcnn/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/cascade_rcnn/README.md deleted file mode 100644 index 4b1b4c87fd9429b3ef5207422b79741c3f670460..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/cascade_rcnn/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Cascade R-CNN: High Quality Object Detection and Instance Segmentation - -## Introduction - - - -```latex -@article{Cai_2019, - title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation}, - ISSN={1939-3539}, - url={http://dx.doi.org/10.1109/tpami.2019.2956516}, - DOI={10.1109/tpami.2019.2956516}, - journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher={Institute of Electrical and Electronics Engineers (IEEE)}, - author={Cai, Zhaowei and Vasconcelos, Nuno}, - year={2019}, - pages={1–1} -} -``` - -## Results and models - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: |:------:|:--------:| -| R-50-FPN | caffe | 1x | 4.2 | | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) | -| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) | -| R-50-FPN | pytorch | 20e | - | - | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) | -| R-101-FPN | caffe | 1x | 6.2 | | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) | -| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) | -| R-101-FPN | pytorch | 20e | - | - | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 10.9 | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) | -| X-101-32x4d-FPN | pytorch | 20e | 7.6 | | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.7 | | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 10.7 | | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json)| - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: | -| R-50-FPN | caffe | 1x | 5.9 | | 41.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) | -| R-50-FPN | pytorch | 1x | 6.0 | 11.2 | 41.2 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) | -| R-50-FPN | pytorch | 20e | - | - | 41.9 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json)| -| R-101-FPN | caffe | 1x | 7.8 | | 43.2 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json)| -| R-101-FPN | pytorch | 1x | 7.9 | 9.8 | 42.9 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) | -| R-101-FPN | pytorch | 20e | - | - | 43.4 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json)| -| X-101-32x4d-FPN | pytorch | 1x | 9.2 | 8.6 | 44.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) | -| X-101-32x4d-FPN | pytorch | 20e | 9.2 | - | 45.0 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 12.2 | 6.7 | 45.3 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 12.2 | | 45.6 |39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json)| - -**Notes:** - -- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs. diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py deleted file mode 100644 index 14eaef2dffea606027001b69d12d11cb46693e1c..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_dc5.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 781dba78d68e77fa7eee15f5bbcc539731f8378d..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py deleted file mode 100644 index b9e5524a6d8352201ae24b57560437b93de2ae80..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch')) -data = dict(samples_per_gpu=1, workers_per_gpu=1) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py deleted file mode 100644 index 8a2ef260bac24c2a6a849b2492e438d317acf355..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py +++ /dev/null @@ -1,79 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -cudnn_benchmark = True -# model settings -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - type='RetinaNet', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch'), - neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), - bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), - # training and testing settings - train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=50) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_ga_anchor_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_ga_anchor_head.py deleted file mode 100644 index 4da346d35ea68e3100e0fd6a01183cf372a5dc21..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tests/test_models/test_dense_heads/test_ga_anchor_head.py +++ /dev/null @@ -1,90 +0,0 @@ -import mmcv -import torch - -from mmdet.models.dense_heads import GuidedAnchorHead - - -def test_ga_anchor_head_loss(): - """Tests anchor head loss when truth is empty and non-empty.""" - s = 256 - img_metas = [{ - 'img_shape': (s, s, 3), - 'scale_factor': 1, - 'pad_shape': (s, s, 3) - }] - - cfg = mmcv.Config( - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5, - pos_weight=-1, - debug=False)) - head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg) - - # Anchor head expects a multiple levels of features per image - if torch.cuda.is_available(): - head.cuda() - feat = [ - torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda() - for i in range(len(head.approx_anchor_generator.base_anchors)) - ] - cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat) - - # Test that empty ground truth encourages the network to predict - # background - gt_bboxes = [torch.empty((0, 4)).cuda()] - gt_labels = [torch.LongTensor([]).cuda()] - - gt_bboxes_ignore = None - - empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds, - loc_preds, gt_bboxes, gt_labels, img_metas, - gt_bboxes_ignore) - - # When there is no truth, the cls loss should be nonzero but there - # should be no box loss. - empty_cls_loss = sum(empty_gt_losses['loss_cls']) - empty_box_loss = sum(empty_gt_losses['loss_bbox']) - assert empty_cls_loss.item() > 0, 'cls loss should be non-zero' - assert empty_box_loss.item() == 0, ( - 'there should be no box loss when there are no true boxes') - - # When truth is non-empty then both cls and box loss should be nonzero - # for random inputs - gt_bboxes = [ - torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(), - ] - gt_labels = [torch.LongTensor([2]).cuda()] - one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds, - loc_preds, gt_bboxes, gt_labels, img_metas, - gt_bboxes_ignore) - onegt_cls_loss = sum(one_gt_losses['loss_cls']) - onegt_box_loss = sum(one_gt_losses['loss_bbox']) - assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero' - assert onegt_box_loss.item() > 0, 'box loss should be non-zero' diff --git a/spaces/ttt246/brain/Brain/src/router/__init__.py b/spaces/ttt246/brain/Brain/src/router/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/typesdigital/BLOOMChat/style.css b/spaces/typesdigital/BLOOMChat/style.css deleted file mode 100644 index 00901d44b3146e9cfb8b309be8c76473bf1b3b33..0000000000000000000000000000000000000000 --- a/spaces/typesdigital/BLOOMChat/style.css +++ /dev/null @@ -1,8 +0,0 @@ -body { - padding: 0; - margin: 0; -} - -iframe { - width:100vw;height:100vh;border:0; -} diff --git a/spaces/unilux/ASR_for_Luxembourgish_w2v/README.md b/spaces/unilux/ASR_for_Luxembourgish_w2v/README.md deleted file mode 100644 index 08937df4c60c57afe16f4e3ce0c1229b439d2d11..0000000000000000000000000000000000000000 --- a/spaces/unilux/ASR_for_Luxembourgish_w2v/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ASR for Luxembourgish w\ wav2vec2 -emoji: 🏃 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: unilux/ASR_for_Luxembourgish ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/AutoCAD Mechanical 2016 Keygen Only Xforce 3 Rar.md b/spaces/usbethFlerru/sovits-modelsV2/example/AutoCAD Mechanical 2016 Keygen Only Xforce 3 Rar.md deleted file mode 100644 index 2c85ce1a20535f84375d36783b61dc6035543de4..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/AutoCAD Mechanical 2016 Keygen Only Xforce 3 Rar.md +++ /dev/null @@ -1,11 +0,0 @@ -
        -

        request codes are necessary only if you have perpetual license software and need an activation code to manually activate software on a computer with no internet access. generating a request code is the first step in the process of manually activating your autodesk software.

        -

        AutoCAD Mechanical 2016 keygen only xforce 3 rar


        Download Filehttps://urlcod.com/2uyVkJ



        -

        in this video i'm going to show you how to activate your autocad 2016 software on an unsupported computer. you can find the activation screens for autocad 2016 in the product activation wizard. in the product activation wizard, select the following option to activate autocad 2016 on an unsupported computer:

        -

        you can either activate autocad 2016 or your other license with a code in the computer where you installed the software. if you have a printer, you can print the activation code by following the instructions printed on the receipt for the software.

        -

        if you don't have your serial number stored in the computer where the software is installed, you'll need to activate your copy of the software by accessing the website you receive an invitation to access when you purchase a license for the software.

        -

        -

        this section includes the actual activation instructions for autocad 2016. read the activation instructions carefully. you must follow the instruction exactly, or the activation process may fail. visit our autodesk autocad 2016 online support pages for more information about activation.

        -

        first, install the update according to the instructions printed in the software. you may require an internet connection. before installing the update, please back up your installed.ini file. in the event of an installation issue, you may need to remove, or uninstall, and then reinstall the previous version of the software. to help you locate and uninstall the previous version of the software, autocad 2016 help and support will request that you save a copy of the installed program, and then proceed to uninstall the software.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/CoursInstallationCameraDeSurveillancepdf Learn How to Install and Maintain Security Cameras.md b/spaces/usbethFlerru/sovits-modelsV2/example/CoursInstallationCameraDeSurveillancepdf Learn How to Install and Maintain Security Cameras.md deleted file mode 100644 index 0e6d96e1dcbc6a9ea21ecaab1293f0307e6a4315..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/CoursInstallationCameraDeSurveillancepdf Learn How to Install and Maintain Security Cameras.md +++ /dev/null @@ -1,6 +0,0 @@ -

        CoursInstallationCameraDeSurveillancepdf


        DOWNLOADhttps://urlcod.com/2uyUTg



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/test_time_augmentation.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/test_time_augmentation.md deleted file mode 100644 index 577c5ddc1f85cda2eb3851dabae8b2158f235c72..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/yolov5/tutorials/test_time_augmentation.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -comments: true -description: Learn how to use Test Time Augmentation (TTA) with YOLOv5 to improve mAP and Recall during testing and inference. Code examples included. -keywords: YOLOv5, test time augmentation, TTA, mAP, recall, object detection, deep learning, computer vision, PyTorch ---- - -# Test-Time Augmentation (TTA) - -📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. -UPDATED 25 September 2022. - -## Before You Start - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a [**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -## Test Normally - -Before trying TTA we want to establish a baseline performance to compare to. This command tests YOLOv5x on COCO val2017 at image size 640 pixels. `yolov5x.pt` is the largest and most accurate model available. Other options are `yolov5s.pt`, `yolov5m.pt` and `yolov5l.pt`, or you own checkpoint from training a custom dataset `./weights/best.pt`. For details on all available models please see our README [table](https://github.com/ultralytics/yolov5#pretrained-checkpoints). - -```bash -python val.py --weights yolov5x.pt --data coco.yaml --img 640 --half -``` - -Output: - -```shell -val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Fusing layers... -Model Summary: 476 layers, 87730285 parameters, 0 gradients - -val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] -val: New cache created: ../datasets/coco/val2017.cache - Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [02:30<00:00, 1.05it/s] - all 5000 36335 0.746 0.626 0.68 0.49 -Speed: 0.1ms pre-process, 22.4ms inference, 1.4ms NMS per image at shape (32, 3, 640, 640) # <--- baseline speed - -Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json... -... - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504 # <--- baseline mAP - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546 - Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681 # <--- baseline mAR - Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826 -``` - -## Test with TTA - -Append `--augment` to any existing `val.py` command to enable TTA, and increase the image size by about 30% for improved results. Note that inference with TTA enabled will typically take about 2-3X the time of normal inference as the images are being left-right flipped and processed at 3 different resolutions, with the outputs merged before NMS. Part of the speed decrease is simply due to larger image sizes (832 vs 640), while part is due to the actual TTA operations. - -```bash -python val.py --weights yolov5x.pt --data coco.yaml --img 832 --augment --half -``` - -Output: - -```shell -val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=832, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=True, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Fusing layers... -/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.) - return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode) -Model Summary: 476 layers, 87730285 parameters, 0 gradients -val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2885.61it/s] -val: New cache created: ../datasets/coco/val2017.cache - Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [07:29<00:00, 2.86s/it] - all 5000 36335 0.718 0.656 0.695 0.503 -Speed: 0.2ms pre-process, 80.6ms inference, 2.7ms NMS per image at shape (32, 3, 832, 832) # <--- TTA speed - -Evaluating pycocotools mAP... saving runs/val/exp2/yolov5x_predictions.json... -... - Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.516 # <--- TTA mAP - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.701 - Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.562 - Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.361 - Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.564 - Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.656 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.388 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.640 - Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.696 # <--- TTA mAR - Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.553 - Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.744 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.833 -``` - -## Inference with TTA - -`detect.py` TTA inference operates identically to `val.py` TTA: simply append `--augment` to any existing `detect.py` command: - -```bash -python detect.py --weights yolov5s.pt --img 832 --source data/images --augment -``` - -Output: - -```bash -detect: weights=['yolov5s.pt'], source=data/images, imgsz=832, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=True, update=False, project=runs/detect, name=exp, exist_ok=False, line_width=3, hide_labels=False, hide_conf=False, half=False -YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) - -Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt... -100% 14.1M/14.1M [00:00<00:00, 81.9MB/s] - -Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients -image 1/2 /content/yolov5/data/images/bus.jpg: 832x640 4 persons, 1 bus, 1 fire hydrant, Done. (0.029s) -image 2/2 /content/yolov5/data/images/zidane.jpg: 480x832 3 persons, 3 ties, Done. (0.024s) -Results saved to runs/detect/exp -Done. (0.156s) -``` - - - -### PyTorch Hub TTA - -TTA is automatically integrated into all [YOLOv5 PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5) models, and can be accessed by passing `augment=True` at inference time. - -```python -import torch - -# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom - -# Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple - -# Inference -results = model(img, augment=True) # <--- TTA inference - -# Results -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -### Customize - -You can customize the TTA ops applied in the YOLOv5 `forward_augment()` method [here](https://github.com/ultralytics/yolov5/blob/8c6f9e15bfc0000d18b976a95b9d7c17d407ec91/models/yolo.py#L125-L137). - -## Environments - -YOLOv5 is designed to be run in the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) -- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls - -## Status - -YOLOv5 CI - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-CPP-Inference/inference.cpp b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-CPP-Inference/inference.cpp deleted file mode 100644 index 12c26079bcbf1b69b92e2305830dce2474a37288..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/examples/YOLOv8-CPP-Inference/inference.cpp +++ /dev/null @@ -1,185 +0,0 @@ -#include "inference.h" - -Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda) -{ - modelPath = onnxModelPath; - modelShape = modelInputShape; - classesPath = classesTxtFile; - cudaEnabled = runWithCuda; - - loadOnnxNetwork(); - // loadClassesFromFile(); The classes are hard-coded for this example -} - -std::vector Inference::runInference(const cv::Mat &input) -{ - cv::Mat modelInput = input; - if (letterBoxForSquare && modelShape.width == modelShape.height) - modelInput = formatToSquare(modelInput); - - cv::Mat blob; - cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false); - net.setInput(blob); - - std::vector outputs; - net.forward(outputs, net.getUnconnectedOutLayersNames()); - - int rows = outputs[0].size[1]; - int dimensions = outputs[0].size[2]; - - bool yolov8 = false; - // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c]) - // yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + box[x,y,w,h]) - if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8) - { - yolov8 = true; - rows = outputs[0].size[2]; - dimensions = outputs[0].size[1]; - - outputs[0] = outputs[0].reshape(1, dimensions); - cv::transpose(outputs[0], outputs[0]); - } - float *data = (float *)outputs[0].data; - - float x_factor = modelInput.cols / modelShape.width; - float y_factor = modelInput.rows / modelShape.height; - - std::vector class_ids; - std::vector confidences; - std::vector boxes; - - for (int i = 0; i < rows; ++i) - { - if (yolov8) - { - float *classes_scores = data+4; - - cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); - cv::Point class_id; - double maxClassScore; - - minMaxLoc(scores, 0, &maxClassScore, 0, &class_id); - - if (maxClassScore > modelScoreThreshold) - { - confidences.push_back(maxClassScore); - class_ids.push_back(class_id.x); - - float x = data[0]; - float y = data[1]; - float w = data[2]; - float h = data[3]; - - int left = int((x - 0.5 * w) * x_factor); - int top = int((y - 0.5 * h) * y_factor); - - int width = int(w * x_factor); - int height = int(h * y_factor); - - boxes.push_back(cv::Rect(left, top, width, height)); - } - } - else // yolov5 - { - float confidence = data[4]; - - if (confidence >= modelConfidenceThreshold) - { - float *classes_scores = data+5; - - cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); - cv::Point class_id; - double max_class_score; - - minMaxLoc(scores, 0, &max_class_score, 0, &class_id); - - if (max_class_score > modelScoreThreshold) - { - confidences.push_back(confidence); - class_ids.push_back(class_id.x); - - float x = data[0]; - float y = data[1]; - float w = data[2]; - float h = data[3]; - - int left = int((x - 0.5 * w) * x_factor); - int top = int((y - 0.5 * h) * y_factor); - - int width = int(w * x_factor); - int height = int(h * y_factor); - - boxes.push_back(cv::Rect(left, top, width, height)); - } - } - } - - data += dimensions; - } - - std::vector nms_result; - cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result); - - std::vector detections{}; - for (unsigned long i = 0; i < nms_result.size(); ++i) - { - int idx = nms_result[i]; - - Detection result; - result.class_id = class_ids[idx]; - result.confidence = confidences[idx]; - - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution dis(100, 255); - result.color = cv::Scalar(dis(gen), - dis(gen), - dis(gen)); - - result.className = classes[result.class_id]; - result.box = boxes[idx]; - - detections.push_back(result); - } - - return detections; -} - -void Inference::loadClassesFromFile() -{ - std::ifstream inputFile(classesPath); - if (inputFile.is_open()) - { - std::string classLine; - while (std::getline(inputFile, classLine)) - classes.push_back(classLine); - inputFile.close(); - } -} - -void Inference::loadOnnxNetwork() -{ - net = cv::dnn::readNetFromONNX(modelPath); - if (cudaEnabled) - { - std::cout << "\nRunning on CUDA" << std::endl; - net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); - net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); - } - else - { - std::cout << "\nRunning on CPU" << std::endl; - net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV); - net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); - } -} - -cv::Mat Inference::formatToSquare(const cv::Mat &source) -{ - int col = source.cols; - int row = source.rows; - int _max = MAX(col, row); - cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3); - source.copyTo(result(cv::Rect(0, 0, col, row))); - return result; -} diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/utils/__init__.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/utils/__init__.py deleted file mode 100644 index bcfce51eb93e38843a9843f3daeb8f4085300ba9..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/yolo/utils/__init__.py +++ /dev/null @@ -1,779 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -import contextlib -import inspect -import logging.config -import os -import platform -import re -import subprocess -import sys -import threading -import urllib -import uuid -from pathlib import Path -from types import SimpleNamespace -from typing import Union - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -import yaml - -from ultralytics import __version__ - -# PyTorch Multi-GPU DDP Constants -RANK = int(os.getenv('RANK', -1)) -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) - -# Other Constants -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLO -DEFAULT_CFG_PATH = ROOT / 'yolo/cfg/default.yaml' -NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode -VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format -LOGGING_NAME = 'ultralytics' -MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans -ARM64 = platform.machine() in ('arm64', 'aarch64') # ARM64 booleans -HELP_MSG = \ - """ - Usage examples for running YOLOv8: - - 1. Install the ultralytics package: - - pip install ultralytics - - 2. Use the Python SDK: - - from ultralytics import YOLO - - # Load a model - model = YOLO('yolov8n.yaml') # build a new model from scratch - model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) - - # Use the model - results = model.train(data="coco128.yaml", epochs=3) # train the model - results = model.val() # evaluate model performance on the validation set - results = model('https://ultralytics.com/images/bus.jpg') # predict on an image - success = model.export(format='onnx') # export the model to ONNX format - - 3. Use the command line interface (CLI): - - YOLOv8 'yolo' CLI commands use the following syntax: - - yolo TASK MODE ARGS - - Where TASK (optional) is one of [detect, segment, classify] - MODE (required) is one of [train, val, predict, export] - ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. - See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' - - - Train a detection model for 10 epochs with an initial learning_rate of 0.01 - yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 - - - Predict a YouTube video using a pretrained segmentation model at image size 320: - yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 - - - Val a pretrained detection model at batch-size 1 and image size 640: - yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 - - - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) - yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 - - - Run special commands: - yolo help - yolo checks - yolo version - yolo settings - yolo copy-cfg - yolo cfg - - Docs: https://docs.ultralytics.com - Community: https://community.ultralytics.com - GitHub: https://github.com/ultralytics/ultralytics - """ - -# Settings -torch.set_printoptions(linewidth=320, precision=4, profile='default') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab - - -class SimpleClass: - """ - Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute - access methods for easier debugging and usage. - """ - - def __str__(self): - """Return a human-readable string representation of the object.""" - attr = [] - for a in dir(self): - v = getattr(self, a) - if not callable(v) and not a.startswith('_'): - if isinstance(v, SimpleClass): - # Display only the module and class name for subclasses - s = f'{a}: {v.__module__}.{v.__class__.__name__} object' - else: - s = f'{a}: {repr(v)}' - attr.append(s) - return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr) - - def __repr__(self): - """Return a machine-readable string representation of the object.""" - return self.__str__() - - def __getattr__(self, attr): - """Custom attribute access error message with helpful information.""" - name = self.__class__.__name__ - raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") - - -class IterableSimpleNamespace(SimpleNamespace): - """ - Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and - enables usage with dict() and for loops. - """ - - def __iter__(self): - """Return an iterator of key-value pairs from the namespace's attributes.""" - return iter(vars(self).items()) - - def __str__(self): - """Return a human-readable string representation of the object.""" - return '\n'.join(f'{k}={v}' for k, v in vars(self).items()) - - def __getattr__(self, attr): - """Custom attribute access error message with helpful information.""" - name = self.__class__.__name__ - raise AttributeError(f""" - '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics - 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace - {DEFAULT_CFG_PATH} with the latest version from - https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml - """) - - def get(self, key, default=None): - """Return the value of the specified key if it exists; otherwise, return the default value.""" - return getattr(self, key, default) - - -def plt_settings(rcparams=None, backend='Agg'): - """ - Decorator to temporarily set rc parameters and the backend for a plotting function. - - Usage: - decorator: @plt_settings({"font.size": 12}) - context manager: with plt_settings({"font.size": 12}): - - Args: - rcparams (dict): Dictionary of rc parameters to set. - backend (str, optional): Name of the backend to use. Defaults to 'Agg'. - - Returns: - (Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be - applied to any function that needs to have specific matplotlib rc parameters and backend for its execution. - """ - - if rcparams is None: - rcparams = {'font.size': 11} - - def decorator(func): - """Decorator to apply temporary rc parameters and backend to a function.""" - - def wrapper(*args, **kwargs): - """Sets rc parameters and backend, calls the original function, and restores the settings.""" - original_backend = plt.get_backend() - plt.switch_backend(backend) - - with plt.rc_context(rcparams): - result = func(*args, **kwargs) - - plt.switch_backend(original_backend) - return result - - return wrapper - - return decorator - - -def set_logging(name=LOGGING_NAME, verbose=True): - """Sets up logging for the given name.""" - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - logging.config.dictConfig({ - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - name: { - 'format': '%(message)s'}}, - 'handlers': { - name: { - 'class': 'logging.StreamHandler', - 'formatter': name, - 'level': level}}, - 'loggers': { - name: { - 'level': level, - 'handlers': [name], - 'propagate': False}}}) - - -def emojis(string=''): - """Return platform-dependent emoji-safe version of string.""" - return string.encode().decode('ascii', 'ignore') if WINDOWS else string - - -class EmojiFilter(logging.Filter): - """ - A custom logging filter class for removing emojis in log messages. - - This filter is particularly useful for ensuring compatibility with Windows terminals - that may not support the display of emojis in log messages. - """ - - def filter(self, record): - """Filter logs by emoji unicode characters on windows.""" - record.msg = emojis(record.msg) - return super().filter(record) - - -# Set logger -set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER -LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) -if WINDOWS: # emoji-safe logging - LOGGER.addFilter(EmojiFilter()) - - -def yaml_save(file='data.yaml', data=None): - """ - Save YAML data to a file. - - Args: - file (str, optional): File name. Default is 'data.yaml'. - data (dict): Data to save in YAML format. - - Returns: - (None): Data is saved to the specified file. - """ - if data is None: - data = {} - file = Path(file) - if not file.parent.exists(): - # Create parent directories if they don't exist - file.parent.mkdir(parents=True, exist_ok=True) - - # Convert Path objects to strings - for k, v in data.items(): - if isinstance(v, Path): - data[k] = str(v) - - # Dump data to file in YAML format - with open(file, 'w') as f: - yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True) - - -def yaml_load(file='data.yaml', append_filename=False): - """ - Load YAML data from a file. - - Args: - file (str, optional): File name. Default is 'data.yaml'. - append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False. - - Returns: - (dict): YAML data and file name. - """ - with open(file, errors='ignore', encoding='utf-8') as f: - s = f.read() # string - - # Remove special characters - if not s.isprintable(): - s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s) - - # Add YAML filename to dict and return - return {**yaml.safe_load(s), 'yaml_file': str(file)} if append_filename else yaml.safe_load(s) - - -def yaml_print(yaml_file: Union[str, Path, dict]) -> None: - """ - Pretty prints a yaml file or a yaml-formatted dictionary. - - Args: - yaml_file: The file path of the yaml file or a yaml-formatted dictionary. - - Returns: - None - """ - yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file - dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) - LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}") - - -# Default configuration -DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH) -for k, v in DEFAULT_CFG_DICT.items(): - if isinstance(v, str) and v.lower() == 'none': - DEFAULT_CFG_DICT[k] = None -DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys() -DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT) - - -def is_colab(): - """ - Check if the current script is running inside a Google Colab notebook. - - Returns: - (bool): True if running inside a Colab notebook, False otherwise. - """ - return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ - - -def is_kaggle(): - """ - Check if the current script is running inside a Kaggle kernel. - - Returns: - (bool): True if running inside a Kaggle kernel, False otherwise. - """ - return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' - - -def is_jupyter(): - """ - Check if the current script is running inside a Jupyter Notebook. - Verified on Colab, Jupyterlab, Kaggle, Paperspace. - - Returns: - (bool): True if running inside a Jupyter Notebook, False otherwise. - """ - with contextlib.suppress(Exception): - from IPython import get_ipython - return get_ipython() is not None - return False - - -def is_docker() -> bool: - """ - Determine if the script is running inside a Docker container. - - Returns: - (bool): True if the script is running inside a Docker container, False otherwise. - """ - file = Path('/proc/self/cgroup') - if file.exists(): - with open(file) as f: - return 'docker' in f.read() - else: - return False - - -def is_online() -> bool: - """ - Check internet connectivity by attempting to connect to a known online host. - - Returns: - (bool): True if connection is successful, False otherwise. - """ - import socket - - for host in '1.1.1.1', '8.8.8.8', '223.5.5.5': # Cloudflare, Google, AliDNS: - try: - test_connection = socket.create_connection(address=(host, 53), timeout=2) - except (socket.timeout, socket.gaierror, OSError): - continue - else: - # If the connection was successful, close it to avoid a ResourceWarning - test_connection.close() - return True - return False - - -ONLINE = is_online() - - -def is_pip_package(filepath: str = __name__) -> bool: - """ - Determines if the file at the given filepath is part of a pip package. - - Args: - filepath (str): The filepath to check. - - Returns: - (bool): True if the file is part of a pip package, False otherwise. - """ - import importlib.util - - # Get the spec for the module - spec = importlib.util.find_spec(filepath) - - # Return whether the spec is not None and the origin is not None (indicating it is a package) - return spec is not None and spec.origin is not None - - -def is_dir_writeable(dir_path: Union[str, Path]) -> bool: - """ - Check if a directory is writeable. - - Args: - dir_path (str | Path): The path to the directory. - - Returns: - (bool): True if the directory is writeable, False otherwise. - """ - return os.access(str(dir_path), os.W_OK) - - -def is_pytest_running(): - """ - Determines whether pytest is currently running or not. - - Returns: - (bool): True if pytest is running, False otherwise. - """ - return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem) - - -def is_github_actions_ci() -> bool: - """ - Determine if the current environment is a GitHub Actions CI Python runner. - - Returns: - (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise. - """ - return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ - - -def is_git_dir(): - """ - Determines whether the current file is part of a git repository. - If the current file is not part of a git repository, returns None. - - Returns: - (bool): True if current file is part of a git repository. - """ - return get_git_dir() is not None - - -def get_git_dir(): - """ - Determines whether the current file is part of a git repository and if so, returns the repository root directory. - If the current file is not part of a git repository, returns None. - - Returns: - (Path | None): Git root directory if found or None if not found. - """ - for d in Path(__file__).parents: - if (d / '.git').is_dir(): - return d - return None # no .git dir found - - -def get_git_origin_url(): - """ - Retrieves the origin URL of a git repository. - - Returns: - (str | None): The origin URL of the git repository. - """ - if is_git_dir(): - with contextlib.suppress(subprocess.CalledProcessError): - origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) - return origin.decode().strip() - return None # if not git dir or on error - - -def get_git_branch(): - """ - Returns the current git branch name. If not in a git repository, returns None. - - Returns: - (str | None): The current git branch name. - """ - if is_git_dir(): - with contextlib.suppress(subprocess.CalledProcessError): - origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) - return origin.decode().strip() - return None # if not git dir or on error - - -def get_default_args(func): - """Returns a dictionary of default arguments for a function. - - Args: - func (callable): The function to inspect. - - Returns: - (dict): A dictionary where each key is a parameter name, and each value is the default value of that parameter. - """ - signature = inspect.signature(func) - return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} - - -def get_user_config_dir(sub_dir='Ultralytics'): - """ - Get the user config directory. - - Args: - sub_dir (str): The name of the subdirectory to create. - - Returns: - (Path): The path to the user config directory. - """ - # Return the appropriate config directory for each operating system - if WINDOWS: - path = Path.home() / 'AppData' / 'Roaming' / sub_dir - elif MACOS: # macOS - path = Path.home() / 'Library' / 'Application Support' / sub_dir - elif LINUX: - path = Path.home() / '.config' / sub_dir - else: - raise ValueError(f'Unsupported operating system: {platform.system()}') - - # GCP and AWS lambda fix, only /tmp is writeable - if not is_dir_writeable(str(path.parent)): - path = Path('/tmp') / sub_dir - LOGGER.warning(f"WARNING ⚠️ user config directory is not writeable, defaulting to '{path}'.") - - # Create the subdirectory if it does not exist - path.mkdir(parents=True, exist_ok=True) - - return path - - -USER_CONFIG_DIR = Path(os.getenv('YOLO_CONFIG_DIR', get_user_config_dir())) # Ultralytics settings dir -SETTINGS_YAML = USER_CONFIG_DIR / 'settings.yaml' - - -def colorstr(*input): - """Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world').""" - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - colors = { - 'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] - - -class TryExcept(contextlib.ContextDecorator): - """YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager.""" - - def __init__(self, msg='', verbose=True): - """Initialize TryExcept class with optional message and verbosity settings.""" - self.msg = msg - self.verbose = verbose - - def __enter__(self): - """Executes when entering TryExcept context, initializes instance.""" - pass - - def __exit__(self, exc_type, value, traceback): - """Defines behavior when exiting a 'with' block, prints error message if necessary.""" - if self.verbose and value: - print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) - return True - - -def threaded(func): - """Multi-threads a target function and returns thread. Usage: @threaded decorator.""" - - def wrapper(*args, **kwargs): - """Multi-threads a given function and returns the thread.""" - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - -def set_sentry(): - """ - Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and - sync=True in settings. Run 'yolo settings' to see and update settings YAML file. - - Conditions required to send errors (ALL conditions must be met or no errors will be reported): - - sentry_sdk package is installed - - sync=True in YOLO settings - - pytest is not running - - running in a pip package installation - - running in a non-git directory - - running with rank -1 or 0 - - online environment - - CLI used to run package (checked with 'yolo' as the name of the main CLI command) - - The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError - exceptions and to exclude events with 'out of memory' in their exception message. - - Additionally, the function sets custom tags and user information for Sentry events. - """ - - def before_send(event, hint): - """ - Modify the event before sending it to Sentry based on specific exception types and messages. - - Args: - event (dict): The event dictionary containing information about the error. - hint (dict): A dictionary containing additional information about the error. - - Returns: - dict: The modified event or None if the event should not be sent to Sentry. - """ - if 'exc_info' in hint: - exc_type, exc_value, tb = hint['exc_info'] - if exc_type in (KeyboardInterrupt, FileNotFoundError) \ - or 'out of memory' in str(exc_value): - return None # do not send event - - event['tags'] = { - 'sys_argv': sys.argv[0], - 'sys_argv_name': Path(sys.argv[0]).name, - 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', - 'os': ENVIRONMENT} - return event - - if SETTINGS['sync'] and \ - RANK in (-1, 0) and \ - Path(sys.argv[0]).name == 'yolo' and \ - not TESTS_RUNNING and \ - ONLINE and \ - is_pip_package() and \ - not is_git_dir(): - - # If sentry_sdk package is not installed then return and do not use Sentry - try: - import sentry_sdk # noqa - except ImportError: - return - - sentry_sdk.init( - dsn='https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016', - debug=False, - traces_sample_rate=1.0, - release=__version__, - environment='production', # 'dev' or 'production' - before_send=before_send, - ignore_errors=[KeyboardInterrupt, FileNotFoundError]) - sentry_sdk.set_user({'id': SETTINGS['uuid']}) # SHA-256 anonymized UUID hash - - # Disable all sentry logging - for logger in 'sentry_sdk', 'sentry_sdk.errors': - logging.getLogger(logger).setLevel(logging.CRITICAL) - - -def get_settings(file=SETTINGS_YAML, version='0.0.3'): - """ - Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist. - - Args: - file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR. - version (str): Settings version. If min settings version not met, new default settings will be saved. - - Returns: - (dict): Dictionary of settings key-value pairs. - """ - import hashlib - - from ultralytics.yolo.utils.checks import check_version - from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first - - git_dir = get_git_dir() - root = git_dir or Path() - datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() - defaults = { - 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory. - 'weights_dir': str(root / 'weights'), # default weights directory. - 'runs_dir': str(root / 'runs'), # default runs directory. - 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash - 'sync': True, # sync analytics to help with YOLO development - 'api_key': '', # Ultralytics HUB API key (https://hub.ultralytics.com/) - 'settings_version': version} # Ultralytics settings version - - with torch_distributed_zero_first(RANK): - if not file.exists(): - yaml_save(file, defaults) - settings = yaml_load(file) - - # Check that settings keys and types match defaults - correct = \ - settings \ - and settings.keys() == defaults.keys() \ - and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \ - and check_version(settings['settings_version'], version) - if not correct: - LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a ' - 'recent ultralytics package update, but may have overwritten previous settings. ' - f"\nView and update settings with 'yolo settings' or at '{file}'") - settings = defaults # merge **defaults with **settings (prefer **settings) - yaml_save(file, settings) # save updated defaults - - return settings - - -def set_settings(kwargs, file=SETTINGS_YAML): - """ - Function that runs on a first-time ultralytics package installation to set up global settings and create necessary - directories. - """ - SETTINGS.update(kwargs) - yaml_save(file, SETTINGS) - - -def deprecation_warn(arg, new_arg, version=None): - """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument.""" - if not version: - version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release - LOGGER.warning(f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. " - f"Please use '{new_arg}' instead.") - - -def clean_url(url): - """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt.""" - url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - return urllib.parse.unquote(url).split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - - -def url2file(url): - """Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt.""" - return Path(clean_url(url)).name - - -# Run below code on yolo/utils init ------------------------------------------------------------------------------------ - -# Check first-install steps -PREFIX = colorstr('Ultralytics: ') -SETTINGS = get_settings() -DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory -ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ - 'Docker' if is_docker() else platform.system() -TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() -set_sentry() - -# Apply monkey patches if the script is being run from within the parent directory of the script's location -from .patches import imread, imshow, imwrite - -# torch.save = torch_save -if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: - cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow diff --git a/spaces/valhalla/glide-text2im/glide_text2im/xf.py b/spaces/valhalla/glide-text2im/glide_text2im/xf.py deleted file mode 100644 index 5dfff440b489f3cc3c62450dc28c2f35f692dd94..0000000000000000000000000000000000000000 --- a/spaces/valhalla/glide-text2im/glide_text2im/xf.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -Transformer implementation adapted from CLIP ViT: -https://github.com/openai/CLIP/blob/4c0275784d6d9da97ca1f47eaaee31de1867da91/clip/model.py -""" - -import math - -import torch as th -import torch.nn as nn - - -def convert_module_to_f16(l): - """ - Convert primitive modules to float16. - """ - if isinstance(l, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - -class LayerNorm(nn.LayerNorm): - """ - Implementation that supports fp16 inputs but fp32 gains/biases. - """ - - def forward(self, x: th.Tensor): - return super().forward(x.float()).to(x.dtype) - - -class MultiheadAttention(nn.Module): - def __init__(self, n_ctx, width, heads): - super().__init__() - self.n_ctx = n_ctx - self.width = width - self.heads = heads - self.c_qkv = nn.Linear(width, width * 3) - self.c_proj = nn.Linear(width, width) - self.attention = QKVMultiheadAttention(heads, n_ctx) - - def forward(self, x): - x = self.c_qkv(x) - x = self.attention(x) - x = self.c_proj(x) - return x - - -class MLP(nn.Module): - def __init__(self, width): - super().__init__() - self.width = width - self.c_fc = nn.Linear(width, width * 4) - self.c_proj = nn.Linear(width * 4, width) - self.gelu = nn.GELU() - - def forward(self, x): - return self.c_proj(self.gelu(self.c_fc(x))) - - -class QKVMultiheadAttention(nn.Module): - def __init__(self, n_heads: int, n_ctx: int): - super().__init__() - self.n_heads = n_heads - self.n_ctx = n_ctx - - def forward(self, qkv): - bs, n_ctx, width = qkv.shape - attn_ch = width // self.n_heads // 3 - scale = 1 / math.sqrt(math.sqrt(attn_ch)) - qkv = qkv.view(bs, n_ctx, self.n_heads, -1) - q, k, v = th.split(qkv, attn_ch, dim=-1) - weight = th.einsum( - "bthc,bshc->bhts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - wdtype = weight.dtype - weight = th.softmax(weight.float(), dim=-1).type(wdtype) - return th.einsum("bhts,bshc->bthc", weight, v).reshape(bs, n_ctx, -1) - - -class ResidualAttentionBlock(nn.Module): - def __init__( - self, - n_ctx: int, - width: int, - heads: int, - ): - super().__init__() - - self.attn = MultiheadAttention( - n_ctx, - width, - heads, - ) - self.ln_1 = LayerNorm(width) - self.mlp = MLP(width) - self.ln_2 = LayerNorm(width) - - def forward(self, x: th.Tensor): - x = x + self.attn(self.ln_1(x)) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(nn.Module): - def __init__( - self, - n_ctx: int, - width: int, - layers: int, - heads: int, - ): - super().__init__() - self.n_ctx = n_ctx - self.width = width - self.layers = layers - self.resblocks = nn.ModuleList( - [ - ResidualAttentionBlock( - n_ctx, - width, - heads, - ) - for _ in range(layers) - ] - ) - - def forward(self, x: th.Tensor): - for block in self.resblocks: - x = block(x) - return x diff --git a/spaces/vikdutt/vd/README.md b/spaces/vikdutt/vd/README.md deleted file mode 100644 index f493b15e9f5f237ba8f5d63d5fd0320b1cdaa1b3..0000000000000000000000000000000000000000 --- a/spaces/vikdutt/vd/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Vd -emoji: 💻 -colorFrom: indigo -colorTo: purple -sdk: static -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vincentclaes/DocumentQAComparator/README.md b/spaces/vincentclaes/DocumentQAComparator/README.md deleted file mode 100644 index 6beb3235e7955397fe7710915023aa1119839f37..0000000000000000000000000000000000000000 --- a/spaces/vincentclaes/DocumentQAComparator/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: DocumentQAComparator -emoji: 🤖🦾⚙️ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -## Setup + Run -``` -pip install -r requirements.txt -python app.py -``` \ No newline at end of file diff --git a/spaces/vorstcavry/VoCh-beta/README.md b/spaces/vorstcavry/VoCh-beta/README.md deleted file mode 100644 index 04416dd473c81c169a5b6b7ad27bd6488b0d7025..0000000000000000000000000000000000000000 --- a/spaces/vorstcavry/VoCh-beta/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: VoCh -emoji: 🎤 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: vorstcavry/VoCh ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/vumichien/Generate_human_motion/pyrender/pyrender/utils.py b/spaces/vumichien/Generate_human_motion/pyrender/pyrender/utils.py deleted file mode 100644 index 48a11faf991606ad7fb0691582f0bc6f06101a45..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/pyrender/pyrender/utils.py +++ /dev/null @@ -1,115 +0,0 @@ -import numpy as np -from PIL import Image - - -def format_color_vector(value, length): - """Format a color vector. - """ - if isinstance(value, int): - value = value / 255.0 - if isinstance(value, float): - value = np.repeat(value, length) - if isinstance(value, list) or isinstance(value, tuple): - value = np.array(value) - if isinstance(value, np.ndarray): - value = value.squeeze() - if np.issubdtype(value.dtype, np.integer): - value = (value / 255.0).astype(np.float32) - if value.ndim != 1: - raise ValueError('Format vector takes only 1-D vectors') - if length > value.shape[0]: - value = np.hstack((value, np.ones(length - value.shape[0]))) - elif length < value.shape[0]: - value = value[:length] - else: - raise ValueError('Invalid vector data type') - - return value.squeeze().astype(np.float32) - - -def format_color_array(value, shape): - """Format an array of colors. - """ - # Convert uint8 to floating - value = np.asanyarray(value) - if np.issubdtype(value.dtype, np.integer): - value = (value / 255.0).astype(np.float32) - - # Match up shapes - if value.ndim == 1: - value = np.tile(value, (shape[0],1)) - if value.shape[1] < shape[1]: - nc = shape[1] - value.shape[1] - value = np.column_stack((value, np.ones((value.shape[0], nc)))) - elif value.shape[1] > shape[1]: - value = value[:,:shape[1]] - return value.astype(np.float32) - - -def format_texture_source(texture, target_channels='RGB'): - """Format a texture as a float32 np array. - """ - - # Pass through None - if texture is None: - return None - - # Convert PIL images into numpy arrays - if isinstance(texture, Image.Image): - if texture.mode == 'P' and target_channels in ('RGB', 'RGBA'): - texture = np.array(texture.convert(target_channels)) - else: - texture = np.array(texture) - - # Format numpy arrays - if isinstance(texture, np.ndarray): - if np.issubdtype(texture.dtype, np.floating): - texture = np.array(texture * 255.0, dtype=np.uint8) - elif np.issubdtype(texture.dtype, np.integer): - texture = texture.astype(np.uint8) - else: - raise TypeError('Invalid type {} for texture'.format( - type(texture) - )) - - # Format array by picking out correct texture channels or padding - if texture.ndim == 2: - texture = texture[:,:,np.newaxis] - if target_channels == 'R': - texture = texture[:,:,0] - texture = texture.squeeze() - elif target_channels == 'RG': - if texture.shape[2] == 1: - texture = np.repeat(texture, 2, axis=2) - else: - texture = texture[:,:,(0,1)] - elif target_channels == 'GB': - if texture.shape[2] == 1: - texture = np.repeat(texture, 2, axis=2) - elif texture.shape[2] > 2: - texture = texture[:,:,(1,2)] - elif target_channels == 'RGB': - if texture.shape[2] == 1: - texture = np.repeat(texture, 3, axis=2) - elif texture.shape[2] == 2: - raise ValueError('Cannot reformat 2-channel texture into RGB') - else: - texture = texture[:,:,(0,1,2)] - elif target_channels == 'RGBA': - if texture.shape[2] == 1: - texture = np.repeat(texture, 4, axis=2) - texture[:,:,3] = 255 - elif texture.shape[2] == 2: - raise ValueError('Cannot reformat 2-channel texture into RGBA') - elif texture.shape[2] == 3: - tx = np.empty((texture.shape[0], texture.shape[1], 4), dtype=np.uint8) - tx[:,:,:3] = texture - tx[:,:,3] = 255 - texture = tx - else: - raise ValueError('Invalid texture channel specification: {}' - .format(target_channels)) - else: - raise TypeError('Invalid type {} for texture'.format(type(texture))) - - return texture diff --git a/spaces/wangguanlin/vits_Kazari/korean.py b/spaces/wangguanlin/vits_Kazari/korean.py deleted file mode 100644 index edee07429a450c55e3d8e246997faaa1e0b89cc9..0000000000000000000000000000000000000000 --- a/spaces/wangguanlin/vits_Kazari/korean.py +++ /dev/null @@ -1,210 +0,0 @@ -import re -from jamo import h2j, j2hcj -import ko_pron - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (ipa, lazy ipa) pairs: -_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('t͡ɕ','ʧ'), - ('d͡ʑ','ʥ'), - ('ɲ','n^'), - ('ɕ','ʃ'), - ('ʷ','w'), - ('ɭ','l`'), - ('ʎ','ɾ'), - ('ɣ','ŋ'), - ('ɰ','ɯ'), - ('ʝ','j'), - ('ʌ','ə'), - ('ɡ','g'), - ('\u031a','#'), - ('\u0348','='), - ('\u031e',''), - ('\u0320',''), - ('\u0339','') -]] - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - text = j2hcj(h2j(text)) - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def korean_to_lazy_ipa(text): - text = latin_to_hangul(text) - text = number_to_hangul(text) - text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text) - for regex, replacement in _ipa_to_lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def korean_to_ipa(text): - text = korean_to_lazy_ipa(text) - return text.replace('ʧ','tʃ').replace('ʥ','dʑ') diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/config.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/config.py deleted file mode 100644 index 908faaaaf09348594f37270a4e93b7cb6c4a9471..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/config.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -Provide configuration, singleton. -@Modified BY: mashenquan, 2023/8/28. Replace the global variable `CONFIG` with `ContextVar`. -""" -import datetime -import json -import os -from copy import deepcopy -from typing import Any -from uuid import uuid4 - -import openai -import yaml - -from metagpt.const import OPTIONS, PROJECT_ROOT, WORKSPACE_ROOT -from metagpt.logs import logger -from metagpt.tools import SearchEngineType, WebBrowserEngineType -from metagpt.utils.cost_manager import CostManager -from metagpt.utils.singleton import Singleton - - -class NotConfiguredException(Exception): - """Exception raised for errors in the configuration. - - Attributes: - message -- explanation of the error - """ - - def __init__(self, message="The required configuration is not set"): - self.message = message - super().__init__(self.message) - - -class Config(metaclass=Singleton): - """ - Usual Usage: - config = Config("config.yaml") - secret_key = config.get_key("MY_SECRET_KEY") - print("Secret key:", secret_key) - """ - - _instance = None - key_yaml_file = PROJECT_ROOT / "config/key.yaml" - default_yaml_file = PROJECT_ROOT / "config/config.yaml" - - def __init__(self, yaml_file=default_yaml_file): - self._init_with_config_files_and_env(yaml_file) - self.cost_manager = CostManager(**json.loads(self.COST_MANAGER)) if self.COST_MANAGER else CostManager() - - logger.info("Config loading done.") - self._update() - - def _update(self): - self.global_proxy = self._get("GLOBAL_PROXY") - self.openai_api_key = self._get("OPENAI_API_KEY") - self.anthropic_api_key = self._get("Anthropic_API_KEY") - if (not self.openai_api_key or "YOUR_API_KEY" == self.openai_api_key) and ( - not self.anthropic_api_key or "YOUR_API_KEY" == self.anthropic_api_key - ): - logger.warning("Set OPENAI_API_KEY or Anthropic_API_KEY first") - self.openai_api_base = self._get("OPENAI_API_BASE") - if not self.openai_api_base or "YOUR_API_BASE" == self.openai_api_base: - openai_proxy = self._get("OPENAI_PROXY") or self.global_proxy - if openai_proxy: - openai.proxy = openai_proxy - else: - logger.info("Set OPENAI_API_BASE in case of network issues") - self.openai_api_type = self._get("OPENAI_API_TYPE") - self.openai_api_version = self._get("OPENAI_API_VERSION") - self.openai_api_rpm = self._get("RPM", 3) - self.openai_api_model = self._get("OPENAI_API_MODEL", "gpt-4") - self.max_tokens_rsp = self._get("MAX_TOKENS", 2048) - self.deployment_id = self._get("DEPLOYMENT_ID") - - self.claude_api_key = self._get("Anthropic_API_KEY") - self.serpapi_api_key = self._get("SERPAPI_API_KEY") - self.serper_api_key = self._get("SERPER_API_KEY") - self.google_api_key = self._get("GOOGLE_API_KEY") - self.google_cse_id = self._get("GOOGLE_CSE_ID") - self.search_engine = SearchEngineType(self._get("SEARCH_ENGINE", SearchEngineType.SERPAPI_GOOGLE)) - self.web_browser_engine = WebBrowserEngineType(self._get("WEB_BROWSER_ENGINE", WebBrowserEngineType.PLAYWRIGHT)) - self.playwright_browser_type = self._get("PLAYWRIGHT_BROWSER_TYPE", "chromium") - self.selenium_browser_type = self._get("SELENIUM_BROWSER_TYPE", "chrome") - - self.long_term_memory = self._get("LONG_TERM_MEMORY", False) - if self.long_term_memory: - logger.warning("LONG_TERM_MEMORY is True") - self.cost_manager.max_budget = self._get("MAX_BUDGET", 10.0) - - self.puppeteer_config = self._get("PUPPETEER_CONFIG", "") - self.mmdc = self._get("MMDC", "mmdc") - self.calc_usage = self._get("CALC_USAGE", True) - self.model_for_researcher_summary = self._get("MODEL_FOR_RESEARCHER_SUMMARY") - self.model_for_researcher_report = self._get("MODEL_FOR_RESEARCHER_REPORT") - - workspace_uid = ( - self._get("WORKSPACE_UID") or f"{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}-{uuid4().hex[-8:]}" - ) - self.workspace = WORKSPACE_ROOT / workspace_uid - - def _init_with_config_files_and_env(self, yaml_file): - """从config/key.yaml / config/config.yaml / env三处按优先级递减加载""" - configs = dict(os.environ) - - for _yaml_file in [yaml_file, self.key_yaml_file]: - if not _yaml_file.exists(): - continue - - # 加载本地 YAML 文件 - with open(_yaml_file, "r", encoding="utf-8") as file: - yaml_data = yaml.safe_load(file) - if not yaml_data: - continue - configs.update(yaml_data) - OPTIONS.set(configs) - - @staticmethod - def _get(*args, **kwargs): - m = OPTIONS.get() - return m.get(*args, **kwargs) - - def get(self, key, *args, **kwargs): - """Retrieve values from config/key.yaml, config/config.yaml, and environment variables. - Throw an error if not found.""" - value = self._get(key, *args, **kwargs) - if value is None: - raise ValueError(f"Key '{key}' not found in environment variables or in the YAML file") - return value - - def __setattr__(self, name: str, value: Any) -> None: - OPTIONS.get()[name] = value - - def __getattr__(self, name: str) -> Any: - m = OPTIONS.get() - return m.get(name) - - def set_context(self, options: dict): - """Update current config""" - if not options: - return - opts = deepcopy(OPTIONS.get()) - opts.update(options) - OPTIONS.set(opts) - self._update() - - @property - def options(self): - """Return all key-values""" - return OPTIONS.get() - - -CONFIG = Config() diff --git a/spaces/wilmars/cluster-app/README.md b/spaces/wilmars/cluster-app/README.md deleted file mode 100644 index 0779da81ac28b837d39d4835508260d9b50df0f6..0000000000000000000000000000000000000000 --- a/spaces/wilmars/cluster-app/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Cluster App -emoji: 🌍 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: src/app.py -pinned: false -license: mit ---- - -# Este apartado es de prueba \ No newline at end of file diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/models.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/models.py deleted file mode 100644 index 7dcd22edf811b952514080f5f06cc43d635ead28..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/models.py +++ /dev/null @@ -1,542 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab!=0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emotion_emb = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab!=0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - x = x + self.emotion_emb(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, emotion_embedding=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/xfys/yolov5_tracking/yolov5/README.md b/spaces/xfys/yolov5_tracking/yolov5/README.md deleted file mode 100644 index 37f683343f53ef962472342eb856de52909a9972..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/yolov5/README.md +++ /dev/null @@ -1,495 +0,0 @@ -
        -

        - - -

        - -[English](README.md) | [简体中文](README.zh-CN.md) -
        - -
        - YOLOv5 CI - YOLOv5 Citation - Docker Pulls -
        - Run on Gradient - Open In Colab - Open In Kaggle -
        -
        - -YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. - -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! - -To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). - -
        - - - - - - - - - - - - - - - - - - - - -
        - -
        -
        - -##
        YOLOv8 🚀 NEW
        - -We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model -released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. -YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of -object detection, image segmentation and image classification tasks. - -See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: - -```commandline -pip install ultralytics -``` - -
        - - -
        - -##
        Documentation
        - -See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples. - -
        -Install - -Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.7.0**](https://www.python.org/) environment, including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). - -```bash -git clone https://github.com/ultralytics/yolov5 # clone -cd yolov5 -pip install -r requirements.txt # install -``` - -
        - -
        -Inference - -YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). - -```python -import torch - -# Model -model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom - -# Images -img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list - -# Inference -results = model(img) - -# Results -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
        - -
        -Inference with detect.py - -`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from -the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. - -```bash -python detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -
        - -
        -Training - -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) -and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the -largest `--batch-size` possible, or pass `--batch-size -1` for -YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. - -```bash -python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
        - -
        -Tutorials - -- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED -- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ -- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 -- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) -- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) -- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) -- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) -- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) -- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW -- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW - -
        - -##
        Integrations
        - -
        - - -
        -
        - -
        - - - - - - - - - - - -
        - -| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | -| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | - -##
        Ultralytics HUB
        - -Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! - - - - -##
        Why YOLOv5
        - -YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. - -

        -
        - YOLOv5-P5 640 Figure - -

        -
        -
        - Figure Notes - -- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. -- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. -- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` - -
        - -### Pretrained Checkpoints - -| Model | size
        (pixels) | mAPval
        50-95 | mAPval
        50 | Speed
        CPU b1
        (ms) | Speed
        V100 b1
        (ms) | Speed
        V100 b32
        (ms) | params
        (M) | FLOPs
        @640 (B) | -| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
        + [TTA] | 1280
        1536 | 55.0
        **55.8** | 72.7
        **72.7** | 3136
        - | 26.2
        - | 19.4
        - | 140.7
        - | 209.8
        - | - -
        - Table Notes - -- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
        Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
        Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
        Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
        - -##
        Segmentation
        - -Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. - -
        - Segmentation Checkpoints - -
        - - -
        - -We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. - -| Model | size
        (pixels) | mAPbox
        50-95 | mAPmask
        50-95 | Train time
        300 epochs
        A100 (hours) | Speed
        ONNX CPU
        (ms) | Speed
        TRT A100
        (ms) | params
        (M) | FLOPs
        @640 (B) | -| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
        Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **Accuracy** values are for single-model single-scale on COCO dataset.
        Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
        Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
        Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
        - -
        - Segmentation Usage Examples  Open In Colab - -### Train - -YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` - -### Val - -Validate YOLOv5s-seg mask mAP on COCO dataset: - -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### Predict - -Use pretrained YOLOv5m-seg.pt to predict bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # load from PyTorch Hub (WARNING: inference not yet supported) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### Export - -Export YOLOv5s-seg model to ONNX and TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 -``` - -
        - -##
        Classification
        - -YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. - -
        - Classification Checkpoints - -
        - -We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. - -| Model | size
        (pixels) | acc
        top1 | acc
        top5 | Training
        90 epochs
        4xA100 (hours) | Speed
        ONNX CPU
        (ms) | Speed
        TensorRT V100
        (ms) | params
        (M) | FLOPs
        @224 (B) | -| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | - -
        - Table Notes (click to expand) - -- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
        Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
        Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` -- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
        Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
        Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` - -
        -
        - -
        - Classification Usage Examples  Open In Colab - -### Train - -YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. - -```bash -# Single-GPU -python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 -``` - -### Val - -Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: - -```bash -bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate -``` - -### Predict - -Use pretrained YOLOv5s-cls.pt to predict bus.jpg: - -```bash -python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub -``` - -### Export - -Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: - -```bash -python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 -``` - -
        - -##
        Environments
        - -Get started in seconds with our verified environments. Click each icon below for details. - -
        - - - - - - - - - - - - - - - - - -
        - -##
        Contribute
        - -We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! - - - - - - -##
        License
        - -YOLOv5 is available under two different licenses: - -- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). - -##
        Contact
        - -For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions! - -
        -
        - - - - - - - - - - - - - - - - - - - - -
        - -[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/spaces/xinyu2/anime-remove-background/app.py b/spaces/xinyu2/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/xinyu2/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py b/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py deleted file mode 100644 index aa0a0d79a5ca7a1816a2089b82e7ef90b28c0f43..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Face_Enhancement/data/custom_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from data.pix2pix_dataset import Pix2pixDataset -from data.image_folder import make_dataset - - -class CustomDataset(Pix2pixDataset): - """ Dataset that loads images from directories - Use option --label_dir, --image_dir, --instance_dir to specify the directories. - The images in the directories are sorted in alphabetical order and paired in order. - """ - - @staticmethod - def modify_commandline_options(parser, is_train): - parser = Pix2pixDataset.modify_commandline_options(parser, is_train) - parser.set_defaults(preprocess_mode="resize_and_crop") - load_size = 286 if is_train else 256 - parser.set_defaults(load_size=load_size) - parser.set_defaults(crop_size=256) - parser.set_defaults(display_winsize=256) - parser.set_defaults(label_nc=13) - parser.set_defaults(contain_dontcare_label=False) - - parser.add_argument( - "--label_dir", type=str, required=True, help="path to the directory that contains label images" - ) - parser.add_argument( - "--image_dir", type=str, required=True, help="path to the directory that contains photo images" - ) - parser.add_argument( - "--instance_dir", - type=str, - default="", - help="path to the directory that contains instance maps. Leave black if not exists", - ) - return parser - - def get_paths(self, opt): - label_dir = opt.label_dir - label_paths = make_dataset(label_dir, recursive=False, read_cache=True) - - image_dir = opt.image_dir - image_paths = make_dataset(image_dir, recursive=False, read_cache=True) - - if len(opt.instance_dir) > 0: - instance_dir = opt.instance_dir - instance_paths = make_dataset(instance_dir, recursive=False, read_cache=True) - else: - instance_paths = [] - - assert len(label_paths) == len( - image_paths - ), "The #images in %s and %s do not match. Is there something wrong?" - - return label_paths, image_paths, instance_paths diff --git a/spaces/yangogo/bingo/tailwind.config.js b/spaces/yangogo/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/yangogo/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/yaoshining/text-generation-webui/docs/Audio-Notification.md b/spaces/yaoshining/text-generation-webui/docs/Audio-Notification.md deleted file mode 100644 index 3baa5349359257acc6f63d075c3c845adb3f5c12..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/docs/Audio-Notification.md +++ /dev/null @@ -1,14 +0,0 @@ -# Audio notification - -If your computer takes a long time to generate each response for the model that you are using, you can enable an audio notification for when the response is completed. This feature was kindly contributed by HappyWorldGames in [#1277](https://github.com/oobabooga/text-generation-webui/pull/1277). - -### Installation - -Simply place a file called "notification.mp3" in the same folder as `server.py`. Here you can find some examples: - -* https://pixabay.com/sound-effects/search/ding/?duration=0-30 -* https://pixabay.com/sound-effects/search/notification/?duration=0-30 - -Source: https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1126 - -This file will be automatically detected the next time you start the web UI. diff --git a/spaces/yaoshining/text-generation-webui/extensions/multimodal/pipeline_loader.py b/spaces/yaoshining/text-generation-webui/extensions/multimodal/pipeline_loader.py deleted file mode 100644 index 8fcd0a9b410fbc44a51941e0a87b294de871ef8b..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/extensions/multimodal/pipeline_loader.py +++ /dev/null @@ -1,52 +0,0 @@ -import traceback -from importlib import import_module -from pathlib import Path -from typing import Tuple - -from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline -from modules import shared -from modules.logging_colors import logger - - -def _get_available_pipeline_modules(): - pipeline_path = Path(__file__).parent / 'pipelines' - modules = [p for p in pipeline_path.iterdir() if p.is_dir()] - return [m.name for m in modules if (m / 'pipelines.py').exists()] - - -def load_pipeline(params: dict) -> Tuple[AbstractMultimodalPipeline, str]: - pipeline_modules = {} - available_pipeline_modules = _get_available_pipeline_modules() - for name in available_pipeline_modules: - try: - pipeline_modules[name] = import_module(f'extensions.multimodal.pipelines.{name}.pipelines') - except: - logger.warning(f'Failed to get multimodal pipelines from {name}') - logger.warning(traceback.format_exc()) - - if shared.args.multimodal_pipeline is not None: - for k in pipeline_modules: - if hasattr(pipeline_modules[k], 'get_pipeline'): - pipeline = getattr(pipeline_modules[k], 'get_pipeline')(shared.args.multimodal_pipeline, params) - if pipeline is not None: - return (pipeline, k) - else: - model_name = shared.args.model.lower() - for k in pipeline_modules: - if hasattr(pipeline_modules[k], 'get_pipeline_from_model_name'): - pipeline = getattr(pipeline_modules[k], 'get_pipeline_from_model_name')(model_name, params) - if pipeline is not None: - return (pipeline, k) - - available = [] - for k in pipeline_modules: - if hasattr(pipeline_modules[k], 'available_pipelines'): - pipelines = getattr(pipeline_modules[k], 'available_pipelines') - available += pipelines - - if shared.args.multimodal_pipeline is not None: - log = f'Multimodal - ERROR: Failed to load multimodal pipeline "{shared.args.multimodal_pipeline}", available pipelines are: {available}.' - else: - log = f'Multimodal - ERROR: Failed to determine multimodal pipeline for model {shared.args.model}, please select one manually using --multimodal-pipeline [PIPELINE]. Available pipelines are: {available}.' - logger.critical(f'{log} Please specify a correct pipeline, or disable the extension') - raise RuntimeError(f'{log} Please specify a correct pipeline, or disable the extension') diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/trackMute/TrackMute.ts b/spaces/yderre-aubay/midi-player-demo/src/common/trackMute/TrackMute.ts deleted file mode 100644 index 937a4ea13b07f85743afb1d33edee88fb6d9fed2..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/trackMute/TrackMute.ts +++ /dev/null @@ -1,106 +0,0 @@ -import { makeObservable, observable } from "mobx" -import { ITrackMute } from "./ITrackMute" - -function updated(obj: T, key: keyof T, value: any) { - return { ...obj, [key]: value } -} - -type BoolMap = { [index: number]: boolean } - -/** - - 操作によって二つのモードが切り替わる - - ## mute モード - - 単に mute/unmute でトラックの出力を OFF/ON にする - solo とは独立してミュート設定を保持する - - ## solo モード - - 何かのトラックを solo にした時にこのモードに遷移する - 指定トラック以外の全てのトラックを mute するが、 - 追加で他のトラックを solo にしたときは - そのトラックの mute を解除する (mute モードのミュート設定とは独立) - - すべてのトラックの solo が解除された時に - mute モードに遷移する - -*/ -export default class TrackMute implements ITrackMute { - private mutes: BoolMap = {} - - private solos: BoolMap = {} - - constructor() { - makeObservable(this, { - mutes: observable, - solos: observable, - }) - } - - reset() { - this.mutes = {} - this.solos = {} - } - - private _setMute(trackId: number, isMute: boolean) { - if (this.isSoloMode()) { - return - } - this.mutes = updated(this.mutes, trackId, isMute) - } - - private _getMute(trackId: number) { - return this.mutes[trackId] || false - } - - private _setSolo(trackId: number, isSolo: boolean) { - this.solos = updated(this.solos, trackId, isSolo) - } - - private _getSolo(trackId: number) { - return this.solos[trackId] || false - } - - mute(trackId: number) { - this._setMute(trackId, true) - } - - unmute(trackId: number) { - this._setMute(trackId, false) - } - - solo(trackId: number) { - this._setSolo(trackId, true) - } - - unsolo(trackId: number) { - this._setSolo(trackId, false) - } - - isSoloMode(): boolean { - // どれかひとつでも solo なら solo モード - // Any one or Solo mode Solo mode - return Object.values(this.solos).some((s) => s) - } - - shouldPlayTrack(trackId: number) { - if (this.isSoloMode()) { - return this._getSolo(trackId) - } else { - return !this._getMute(trackId) - } - } - - // 表示用のメソッド - // Method for display - - isSolo(trackId: number) { - return this.isSoloMode() && this.solos[trackId] - } - - isMuted(trackId: number) { - return !this.shouldPlayTrack(trackId) - } -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/transform/NotePoint.ts b/spaces/yderre-aubay/midi-player-demo/src/common/transform/NotePoint.ts deleted file mode 100644 index f546f57489afe6ff07e011d5b779d36da395750a..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/transform/NotePoint.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { MaxNoteNumber } from "../../main/Constants" - -export interface NotePoint { - tick: number - noteNumber: number -} - -export const clampNoteNumber = (noteNumber: number) => - Math.min(MaxNoteNumber, Math.max(0, noteNumber)) - -export const clampNotePoint = (point: NotePoint): NotePoint => ({ - tick: Math.max(0, point.tick), - noteNumber: clampNoteNumber(point.noteNumber), -}) diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/ControlPane/Graph/MouseHandler/handlePencilMouseDown.ts b/spaces/yderre-aubay/midi-player-demo/src/main/components/ControlPane/Graph/MouseHandler/handlePencilMouseDown.ts deleted file mode 100644 index d6c127c8147d5401d4f87b78816e56972e59b75a..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/ControlPane/Graph/MouseHandler/handlePencilMouseDown.ts +++ /dev/null @@ -1,57 +0,0 @@ -import { IPoint, pointAdd, pointSub } from "../../../../../common/geometry" -import { - ValueEventType, - createValueEvent, -} from "../../../../../common/helpers/valueEvent" -import { ControlCoordTransform } from "../../../../../common/transform/ControlCoordTransform" -import { - createEvent as createTrackEvent, - updateValueEvents, -} from "../../../../actions" -import { pushHistory } from "../../../../actions/history" -import { getClientPos } from "../../../../helpers/mouseEvent" -import { observeDrag } from "../../../../helpers/observeDrag" -import RootStore from "../../../../stores/RootStore" - -export const handlePencilMouseDown = - (rootStore: RootStore) => - ( - e: MouseEvent, - startPoint: IPoint, - transform: ControlCoordTransform, - type: ValueEventType, - ) => { - pushHistory(rootStore)() - - rootStore.controlStore.selectedEventIds = [] - rootStore.controlStore.selection = null - rootStore.pianoRollStore.selection = null - rootStore.pianoRollStore.selectedNoteIds = [] - - const startClientPos = getClientPos(e) - const pos = transform.fromPosition(startPoint) - - const event = createValueEvent(type)(pos.value) - createTrackEvent(rootStore)(event, pos.tick) - - let lastTick = pos.tick - let lastValue = pos.value - - observeDrag({ - onMouseMove: (e) => { - const posPx = getClientPos(e) - const deltaPx = pointSub(posPx, startClientPos) - const local = pointAdd(startPoint, deltaPx) - const value = Math.max( - 0, - Math.min(transform.maxValue, transform.fromPosition(local).value), - ) - const tick = transform.getTicks(local.x) - - updateValueEvents(type)(rootStore)(lastValue, value, lastTick, tick) - - lastTick = tick - lastValue = value - }, - }) - } diff --git a/spaces/yeqingmei123/face-test/e4e/criteria/id_loss.py b/spaces/yeqingmei123/face-test/e4e/criteria/id_loss.py deleted file mode 100644 index bab806172eff18c0630536ae96817508c3197b8b..0000000000000000000000000000000000000000 --- a/spaces/yeqingmei123/face-test/e4e/criteria/id_loss.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from torch import nn -from configs.paths_config import model_paths -from models.encoders.model_irse import Backbone - - -class IDLoss(nn.Module): - def __init__(self): - super(IDLoss, self).__init__() - print('Loading ResNet ArcFace') - self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') - self.facenet.load_state_dict(torch.load(model_paths['ir_se50'])) - self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) - self.facenet.eval() - for module in [self.facenet, self.face_pool]: - for param in module.parameters(): - param.requires_grad = False - - def extract_feats(self, x): - x = x[:, :, 35:223, 32:220] # Crop interesting region - x = self.face_pool(x) - x_feats = self.facenet(x) - return x_feats - - def forward(self, y_hat, y, x): - n_samples = x.shape[0] - x_feats = self.extract_feats(x) - y_feats = self.extract_feats(y) # Otherwise use the feature from there - y_hat_feats = self.extract_feats(y_hat) - y_feats = y_feats.detach() - loss = 0 - sim_improvement = 0 - id_logs = [] - count = 0 - for i in range(n_samples): - diff_target = y_hat_feats[i].dot(y_feats[i]) - diff_input = y_hat_feats[i].dot(x_feats[i]) - diff_views = y_feats[i].dot(x_feats[i]) - id_logs.append({'diff_target': float(diff_target), - 'diff_input': float(diff_input), - 'diff_views': float(diff_views)}) - loss += 1 - diff_target - id_diff = float(diff_target) - float(diff_views) - sim_improvement += id_diff - count += 1 - - return loss / count, sim_improvement / count, id_logs diff --git a/spaces/yuhangzang/ContextDet-Demo/app_util.py b/spaces/yuhangzang/ContextDet-Demo/app_util.py deleted file mode 100644 index 619e9aa25fbe43cd06ca5ad80b8bf6992f0f651e..0000000000000000000000000000000000000000 --- a/spaces/yuhangzang/ContextDet-Demo/app_util.py +++ /dev/null @@ -1,201 +0,0 @@ -import argparse -import io - -import matplotlib.pyplot as plt -import numpy as np -import torch -import torchvision.transforms as T -from PIL import Image - -from models.blip2_decoder import BLIP2Decoder -from models.deformable_detr.backbone import build_backbone -from models.contextdet_blip2 import ContextDET -from models.post_process import CondNMSPostProcess -from models.transformer import build_ov_transformer -from util.misc import nested_tensor_from_tensor_list - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - - parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+') - parser.add_argument('--lr_backbone', default=2e-5, type=float) - - parser.add_argument('--with_box_refine', default=True, action='store_false') - parser.add_argument('--two_stage', default=True, action='store_false') - - # * Backbone - parser.add_argument('--backbone', default='resnet50', type=str, - help="Name of the convolutional backbone to use") - parser.add_argument('--dilation', action='store_true', - help="If true, we replace stride with dilation in the last convolutional block (DC5)") - parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), - help="Type of positional embedding to use on top of the image features") - parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float, - help="position / size * scale") - parser.add_argument('--num_feature_levels', default=5, type=int, help='number of feature levels') - - # * Transformer - parser.add_argument('--enc_layers', default=6, type=int, - help="Number of encoding layers in the transformer") - parser.add_argument('--dec_layers', default=6, type=int, - help="Number of decoding layers in the transformer") - parser.add_argument('--dim_feedforward', default=2048, type=int, - help="Intermediate size of the feedforward layers in the transformer blocks") - parser.add_argument('--hidden_dim', default=256, type=int, - help="Size of the embeddings (dimension of the transformer)") - parser.add_argument('--dropout', default=0.0, type=float, - help="Dropout applied in the transformer") - parser.add_argument('--nheads', default=8, type=int, - help="Number of attention heads inside the transformer's attentions") - parser.add_argument('--num_queries', default=900, type=int, - help="Number of query slots") - parser.add_argument('--dec_n_points', default=4, type=int) - parser.add_argument('--enc_n_points', default=4, type=int) - - # * Segmentation - parser.add_argument('--masks', action='store_true', - help="Train segmentation head if the flag is provided") - - parser.add_argument('--assign_first_stage', default=True, action='store_false') - parser.add_argument('--assign_second_stage', default=True, action='store_false') - - parser.add_argument('--name', default='ov') - parser.add_argument('--llm_name', default='bert-base-cased') - - parser.add_argument('--resume', default='', type=str) - return parser.parse_args() - - -COLORS = [ - [0.000, 0.447, 0.741], - [0.850, 0.325, 0.098], - [0.929, 0.694, 0.125], - [0.494, 0.184, 0.556], - [0.466, 0.674, 0.188], - [0.301, 0.745, 0.933] -] - - -def fig2img(fig): - buf = io.BytesIO() - fig.savefig(buf) - buf.seek(0) - img = Image.open(buf) - return img - - -def visualize_prediction(pil_img, output_dict, threshold=0.7): - keep = output_dict["scores"] > threshold - boxes = output_dict["boxes"][keep].tolist() - scores = output_dict["scores"][keep].tolist() - keep_list = keep.nonzero().squeeze(1).numpy().tolist() - labels = [output_dict["names"][i] for i in keep_list] - - plt.figure(figsize=(12.8, 8)) - plt.imshow(pil_img) - ax = plt.gca() - colors = COLORS * 100 - for score, (xmin, ymin, xmax, ymax), label, color in zip(scores, boxes, labels, colors): - ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, color=color, linewidth=3)) - ax.text(xmin, ymin, f"{label}: {score:0.2f}", fontsize=15, bbox=dict(facecolor="yellow", alpha=0.5)) - plt.axis("off") - return fig2img(plt.gcf()) - - -class ContextDetDemo(): - def __init__(self, resume): - self.transform = T.Compose([ - T.Resize(640), - T.ToTensor(), - T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - - args = parse_args() - - args.llm_name = 'caption_coco_opt2.7b' - args.resume = resume - - args.device = 'cuda' if torch.cuda.is_available() else 'cpu' - num_classes = 2 - device = torch.device(args.device) - - backbone = build_backbone(args) - transformer = build_ov_transformer(args) - llm_decoder = BLIP2Decoder(args.llm_name) - model = ContextDET( - backbone, - transformer, - num_classes=num_classes, - num_queries=args.num_queries, - num_feature_levels=args.num_feature_levels, - aux_loss=False, - with_box_refine=args.with_box_refine, - two_stage=args.two_stage, - llm_decoder=llm_decoder, - ) - model = model.to(device) - - checkpoint = torch.load(args.resume, map_location='cpu') - missing_keys, unexpected_keys = model.load_state_dict(checkpoint['model'], strict=False) - if len(missing_keys) > 0: - print('Missing Keys: {}'.format(missing_keys)) - if len(unexpected_keys) > 0: - print('Unexpected Keys: {}'.format(unexpected_keys)) - - postprocessor = CondNMSPostProcess(args.num_queries) - - self.model = model - self.model.eval() - self.postprocessor = postprocessor - - def forward(self, image, text, task_button, history, threshold=0.3): - samples = self.transform(image).unsqueeze(0) - samples = nested_tensor_from_tensor_list(samples) - device = 'cuda' if torch.cuda.is_available() else 'cpu' - samples = samples.to(device) - vis = self.model.llm_decoder.vis_processors - - if task_button == "Question Answering": - text = f"{text} Answer:" - history.append(text) - # prompt = " ".join(history) - prompt = text - elif task_button == "Captioning": - prompt = "A photo of" - else: - prompt = text - - blip2_samples = { - 'image': vis['eval'](image)[None, :].to(device), - 'prompt': [prompt], - } - outputs = self.model(samples, blip2_samples, mask_infos=None, task_button=task_button) - - mask_infos = outputs['mask_infos_pred'] - pred_names = [list(mask_info.values()) for mask_info in mask_infos] - orig_target_sizes = torch.tensor([tuple(reversed(image.size))]).to(device) - results = self.postprocessor(outputs, orig_target_sizes, pred_names, mask_infos)[0] - image_vis = visualize_prediction(image, results, threshold) - - out_text = outputs['output_text'][0] - if task_button == "Cloze Test": - history = [] - chat = [ - (prompt, out_text), - ] - elif task_button == "Captioning": - history = [] - chat = [ - ("please describe the image", out_text), - ] - elif task_button == "Question Answering": - history += [out_text] - chat = [ - (history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) - ] - else: - history = [] - chat = [] - return image_vis, chat, history \ No newline at end of file diff --git a/spaces/zhenwusw/JoJoGAN/e4e/models/__init__.py b/spaces/zhenwusw/JoJoGAN/e4e/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/zhoupin30/zhoupin30/src/app/page.tsx b/spaces/zhoupin30/zhoupin30/src/app/page.tsx deleted file mode 100644 index 0dff3431b098ce4fe282cc83fc87a93a28a43090..0000000000000000000000000000000000000000 --- a/spaces/zhoupin30/zhoupin30/src/app/page.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import dynamic from 'next/dynamic' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { ssr: false } -) - -export default function IndexPage() { - return ( - <> -
        - - - ) -} diff --git a/spaces/zht1/test2/app.py b/spaces/zht1/test2/app.py deleted file mode 100644 index e57faccd9e9b64eac15455e3a992800be5ce4cb0..0000000000000000000000000000000000000000 --- a/spaces/zht1/test2/app.py +++ /dev/null @@ -1,329 +0,0 @@ -import os - -import gradio as gr -import numpy as np -import torch -from mobile_sam import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry -from PIL import ImageDraw -from utils.tools import box_prompt, format_results, point_prompt -from utils.tools_gradio import fast_process - -# Most of our demo code is from [FastSAM Demo](https://huggingface.co/spaces/An-619/FastSAM). Huge thanks for AN-619. - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -# Load the pre-trained model -sam_checkpoint = "./mobile_sam.pt" -model_type = "vit_t" - -mobile_sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) -mobile_sam = mobile_sam.to(device=device) -mobile_sam.eval() - -mask_generator = SamAutomaticMaskGenerator(mobile_sam) -predictor = SamPredictor(mobile_sam) - -# Description -title = "
        Faster Segment Anything(MobileSAM)
        " - -description_e = """This is a demo of [Faster Segment Anything(MobileSAM) Model](https://github.com/ChaoningZhang/MobileSAM). - - We will provide box mode soon. - - Enjoy! - - """ - -description_p = """ # Instructions for point mode - - 0. Restart by click the Restart button - 1. Select a point with Add Mask for the foreground (Must) - 2. Select a point with Remove Area for the background (Optional) - 3. Click the Start Segmenting. - - """ - -examples = [ - ["assets/picture3.jpg"], - ["assets/picture4.jpg"], - ["assets/picture5.jpg"], - ["assets/picture6.jpg"], - ["assets/picture1.jpg"], - ["assets/picture2.jpg"], -] - -default_example = examples[0] - -css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }" - - -@torch.no_grad() -def segment_everything( - image, - input_size=1024, - better_quality=False, - withContours=True, - use_retina=True, - mask_random_color=True, -): - global mask_generator - - input_size = int(input_size) - w, h = image.size - scale = input_size / max(w, h) - new_w = int(w * scale) - new_h = int(h * scale) - image = image.resize((new_w, new_h)) - - nd_image = np.array(image) - annotations = mask_generator.generate(nd_image) - - fig = fast_process( - annotations=annotations, - image=image, - device=device, - scale=(1024 // input_size), - better_quality=better_quality, - mask_random_color=mask_random_color, - bbox=None, - use_retina=use_retina, - withContours=withContours, - ) - return fig - - -def segment_with_points( - image, - input_size=1024, - better_quality=False, - withContours=True, - use_retina=True, - mask_random_color=True, -): - global global_points - global global_point_label - - input_size = int(input_size) - w, h = image.size - scale = input_size / max(w, h) - new_w = int(w * scale) - new_h = int(h * scale) - image = image.resize((new_w, new_h)) - - scaled_points = np.array( - [[int(x * scale) for x in point] for point in global_points] - ) - scaled_point_label = np.array(global_point_label) - - if scaled_points.size == 0 and scaled_point_label.size == 0: - print("No points selected") - return image, image - - print(scaled_points, scaled_points is not None) - print(scaled_point_label, scaled_point_label is not None) - - nd_image = np.array(image) - predictor.set_image(nd_image) - masks, scores, logits = predictor.predict( - point_coords=scaled_points, - point_labels=scaled_point_label, - multimask_output=True, - ) - - results = format_results(masks, scores, logits, 0) - - annotations, _ = point_prompt( - results, scaled_points, scaled_point_label, new_h, new_w - ) - annotations = np.array([annotations]) - - fig = fast_process( - annotations=annotations, - image=image, - device=device, - scale=(1024 // input_size), - better_quality=better_quality, - mask_random_color=mask_random_color, - bbox=None, - use_retina=use_retina, - withContours=withContours, - ) - - global_points = [] - global_point_label = [] - # return fig, None - return fig, image - - -def get_points_with_draw(image, label, evt: gr.SelectData): - global global_points - global global_point_label - - x, y = evt.index[0], evt.index[1] - point_radius, point_color = 15, (255, 255, 0) if label == "Add Mask" else ( - 255, - 0, - 255, - ) - global_points.append([x, y]) - global_point_label.append(1 if label == "Add Mask" else 0) - - print(x, y, label == "Add Mask") - - # 创建一个可以在图像上绘图的对象 - draw = ImageDraw.Draw(image) - draw.ellipse( - [(x - point_radius, y - point_radius), (x + point_radius, y + point_radius)], - fill=point_color, - ) - return image - - -cond_img_e = gr.Image(label="Input", value=default_example[0], type="pil") -cond_img_p = gr.Image(label="Input with points", value=default_example[0], type="pil") - -segm_img_e = gr.Image(label="Segmented Image", interactive=False, type="pil") -segm_img_p = gr.Image( - label="Segmented Image with points", interactive=False, type="pil" -) - -global_points = [] -global_point_label = [] - -input_size_slider = gr.components.Slider( - minimum=512, - maximum=1024, - value=1024, - step=64, - label="Input_size", - info="Our model was trained on a size of 1024", -) - -with gr.Blocks(css=css, title="Faster Segment Anything(MobileSAM)") as demo: - with gr.Row(): - with gr.Column(scale=1): - # Title - gr.Markdown(title) - - # with gr.Tab("Everything mode"): - # # Images - # with gr.Row(variant="panel"): - # with gr.Column(scale=1): - # cond_img_e.render() - # - # with gr.Column(scale=1): - # segm_img_e.render() - # - # # Submit & Clear - # with gr.Row(): - # with gr.Column(): - # input_size_slider.render() - # - # with gr.Row(): - # contour_check = gr.Checkbox( - # value=True, - # label="withContours", - # info="draw the edges of the masks", - # ) - # - # with gr.Column(): - # segment_btn_e = gr.Button( - # "Segment Everything", variant="primary" - # ) - # clear_btn_e = gr.Button("Clear", variant="secondary") - # - # gr.Markdown("Try some of the examples below ⬇️") - # gr.Examples( - # examples=examples, - # inputs=[cond_img_e], - # outputs=segm_img_e, - # fn=segment_everything, - # cache_examples=True, - # examples_per_page=4, - # ) - # - # with gr.Column(): - # with gr.Accordion("Advanced options", open=False): - # # text_box = gr.Textbox(label="text prompt") - # with gr.Row(): - # mor_check = gr.Checkbox( - # value=False, - # label="better_visual_quality", - # info="better quality using morphologyEx", - # ) - # with gr.Column(): - # retina_check = gr.Checkbox( - # value=True, - # label="use_retina", - # info="draw high-resolution segmentation masks", - # ) - # # Description - # gr.Markdown(description_e) - # - with gr.Tab("Point mode"): - # Images - with gr.Row(variant="panel"): - with gr.Column(scale=1): - cond_img_p.render() - - with gr.Column(scale=1): - segm_img_p.render() - - # Submit & Clear - with gr.Row(): - with gr.Column(): - with gr.Row(): - add_or_remove = gr.Radio( - ["Add Mask", "Remove Area"], - value="Add Mask", - ) - - with gr.Column(): - segment_btn_p = gr.Button( - "Start segmenting!", variant="primary" - ) - clear_btn_p = gr.Button("Restart", variant="secondary") - - gr.Markdown("Try some of the examples below ⬇️") - gr.Examples( - examples=examples, - inputs=[cond_img_p], - # outputs=segm_img_p, - # fn=segment_with_points, - # cache_examples=True, - examples_per_page=4, - ) - - with gr.Column(): - # Description - gr.Markdown(description_p) - - cond_img_p.select(get_points_with_draw, [cond_img_p, add_or_remove], cond_img_p) - - # segment_btn_e.click( - # segment_everything, - # inputs=[ - # cond_img_e, - # input_size_slider, - # mor_check, - # contour_check, - # retina_check, - # ], - # outputs=segm_img_e, - # ) - - segment_btn_p.click( - segment_with_points, inputs=[cond_img_p], outputs=[segm_img_p, cond_img_p] - ) - - def clear(): - return None, None - - def clear_text(): - return None, None, None - - # clear_btn_e.click(clear, outputs=[cond_img_e, segm_img_e]) - clear_btn_p.click(clear, outputs=[cond_img_p, segm_img_p]) - -demo.queue() -demo.launch() diff --git a/spaces/zideliu/styledrop/timm/utils/distributed.py b/spaces/zideliu/styledrop/timm/utils/distributed.py deleted file mode 100644 index 3c5dba8c1de5a6ff53638207521377fdfbc4f239..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/utils/distributed.py +++ /dev/null @@ -1,28 +0,0 @@ -""" Distributed training/validation utils - -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch -from torch import distributed as dist - -from .model import unwrap_model - - -def reduce_tensor(tensor, n): - rt = tensor.clone() - dist.all_reduce(rt, op=dist.ReduceOp.SUM) - rt /= n - return rt - - -def distribute_bn(model, world_size, reduce=False): - # ensure every node has the same running bn stats - for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): - if ('running_mean' in bn_name) or ('running_var' in bn_name): - if reduce: - # average bn stats across whole group - torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) - bn_buf /= float(world_size) - else: - # broadcast bn stats from rank 0 to whole group - torch.distributed.broadcast(bn_buf, 0) diff --git a/spaces/ziguo/Real-ESRGAN/realesrgan/archs/__init__.py b/spaces/ziguo/Real-ESRGAN/realesrgan/archs/__init__.py deleted file mode 100644 index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000 --- a/spaces/ziguo/Real-ESRGAN/realesrgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/zihan0516/B1/README.md b/spaces/zihan0516/B1/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/zihan0516/B1/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
        - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
        - - diff --git a/spaces/zxy666/bingo-chatai666/src/components/chat-list.tsx b/spaces/zxy666/bingo-chatai666/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
        - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
        - ) -}